use of org.apache.solr.client.solrj.embedded.JettySolrRunner in project lucene-solr by apache.
the class MiniSolrCloudCluster method startJettySolrRunner.
/**
* Start a new Solr instance on a particular servlet context
*
* @param name the instance name
* @param hostContext the context to run on
* @param config a JettyConfig for the instance's {@link org.apache.solr.client.solrj.embedded.JettySolrRunner}
*
* @return a JettySolrRunner
*/
public JettySolrRunner startJettySolrRunner(String name, String hostContext, JettyConfig config) throws Exception {
Path runnerPath = createInstancePath(name);
String context = getHostContextSuitableForServletContext(hostContext);
JettyConfig newConfig = JettyConfig.builder(config).setContext(context).build();
JettySolrRunner jetty = new JettySolrRunner(runnerPath.toString(), newConfig);
jetty.start();
jettys.add(jetty);
return jetty;
}
use of org.apache.solr.client.solrj.embedded.JettySolrRunner in project lucene-solr by apache.
the class TestTrackingShardHandlerFactory method testRequestTracking.
@Test
@BaseDistributedSearchTestCase.ShardsFixed(num = 2)
public void testRequestTracking() throws Exception {
String collectionName = "testTwoPhase";
List<JettySolrRunner> runners = new ArrayList<>(jettys);
runners.add(controlJetty);
TrackingShardHandlerFactory.RequestTrackingQueue trackingQueue = new TrackingShardHandlerFactory.RequestTrackingQueue();
TrackingShardHandlerFactory.setTrackingQueue(runners, trackingQueue);
for (JettySolrRunner runner : runners) {
CoreContainer container = runner.getCoreContainer();
ShardHandlerFactory factory = container.getShardHandlerFactory();
assert factory instanceof TrackingShardHandlerFactory;
TrackingShardHandlerFactory trackingShardHandlerFactory = (TrackingShardHandlerFactory) factory;
assertSame(trackingQueue, trackingShardHandlerFactory.getTrackingQueue());
}
createCollection(collectionName, 2, 1, 1);
waitForRecoveriesToFinish(collectionName, true);
List<TrackingShardHandlerFactory.ShardRequestAndParams> coreAdminRequests = trackingQueue.getCoreAdminRequests();
assertNotNull(coreAdminRequests);
assertEquals("Unexpected number of core admin requests were found", 2, coreAdminRequests.size());
CloudSolrClient client = cloudClient;
client.setDefaultCollection(collectionName);
/*
hash of b is 95de7e03 high bits=2 shard=shard1
hash of e is 656c4367 high bits=1 shard=shard2
*/
for (int i = 0; i < 10; i++) {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", (i % 2 == 0 ? "b!" : "e!") + i);
doc.addField("a_i", i);
doc.addField("a_t", "text_" + i);
client.add(doc);
}
client.commit();
client.query(new SolrQuery("*:*"));
TrackingShardHandlerFactory.ShardRequestAndParams getTopIdsRequest = trackingQueue.getShardRequestByPurpose(client.getZkStateReader(), collectionName, "shard1", ShardRequest.PURPOSE_GET_TOP_IDS);
assertNotNull(getTopIdsRequest);
getTopIdsRequest = trackingQueue.getShardRequestByPurpose(client.getZkStateReader(), collectionName, "shard2", ShardRequest.PURPOSE_GET_TOP_IDS);
assertNotNull(getTopIdsRequest);
TrackingShardHandlerFactory.ShardRequestAndParams getFieldsRequest = trackingQueue.getShardRequestByPurpose(client.getZkStateReader(), collectionName, "shard1", ShardRequest.PURPOSE_GET_FIELDS);
assertNotNull(getFieldsRequest);
getFieldsRequest = trackingQueue.getShardRequestByPurpose(client.getZkStateReader(), collectionName, "shard2", ShardRequest.PURPOSE_GET_FIELDS);
assertNotNull(getFieldsRequest);
int numRequests = 0;
Map<String, List<TrackingShardHandlerFactory.ShardRequestAndParams>> allRequests = trackingQueue.getAllRequests();
for (Map.Entry<String, List<TrackingShardHandlerFactory.ShardRequestAndParams>> entry : allRequests.entrySet()) {
numRequests += entry.getValue().size();
}
// 4 shard requests + 2 core admin requests (invoked by create collection API)
assertEquals("Total number of requests do not match expected", 6, numRequests);
// reset
TrackingShardHandlerFactory.setTrackingQueue(runners, null);
for (JettySolrRunner runner : runners) {
CoreContainer container = runner.getCoreContainer();
ShardHandlerFactory factory = container.getShardHandlerFactory();
assert factory instanceof TrackingShardHandlerFactory;
TrackingShardHandlerFactory trackingShardHandlerFactory = (TrackingShardHandlerFactory) factory;
assertFalse(trackingShardHandlerFactory.isTracking());
}
// make another request and verify
client.query(new SolrQuery("*:*"));
numRequests = 0;
allRequests = trackingQueue.getAllRequests();
for (Map.Entry<String, List<TrackingShardHandlerFactory.ShardRequestAndParams>> entry : allRequests.entrySet()) {
numRequests += entry.getValue().size();
}
// should still be 6
assertEquals("Total number of shard requests do not match expected", 6, numRequests);
}
use of org.apache.solr.client.solrj.embedded.JettySolrRunner in project lucene-solr by apache.
the class SolrShardReporterTest method test.
@Test
public void test() throws Exception {
waitForRecoveriesToFinish("control_collection", jettys.get(0).getCoreContainer().getZkController().getZkStateReader(), false);
waitForRecoveriesToFinish("collection1", jettys.get(0).getCoreContainer().getZkController().getZkStateReader(), false);
printLayout();
// wait for at least two reports
Thread.sleep(10000);
ClusterState state = jettys.get(0).getCoreContainer().getZkController().getClusterState();
for (JettySolrRunner jetty : jettys) {
CoreContainer cc = jetty.getCoreContainer();
SolrMetricManager metricManager = cc.getMetricManager();
for (final String coreName : cc.getLoadedCoreNames()) {
CoreDescriptor cd = cc.getCoreDescriptor(coreName);
if (cd.getCloudDescriptor() == null) {
// not a cloud collection
continue;
}
CloudDescriptor cloudDesc = cd.getCloudDescriptor();
DocCollection docCollection = state.getCollection(cloudDesc.getCollectionName());
String replicaName = SolrCoreMetricManager.parseReplicaName(cloudDesc.getCollectionName(), coreName);
if (replicaName == null) {
replicaName = cloudDesc.getCoreNodeName();
}
String registryName = SolrCoreMetricManager.createRegistryName(true, cloudDesc.getCollectionName(), cloudDesc.getShardId(), replicaName, null);
String leaderRegistryName = SolrCoreMetricManager.createLeaderRegistryName(true, cloudDesc.getCollectionName(), cloudDesc.getShardId());
boolean leader = cloudDesc.isLeader();
Slice slice = docCollection.getSlice(cloudDesc.getShardId());
int numReplicas = slice.getReplicas().size();
if (leader) {
assertTrue(metricManager.registryNames() + " doesn't contain " + leaderRegistryName, metricManager.registryNames().contains(leaderRegistryName));
Map<String, Metric> metrics = metricManager.registry(leaderRegistryName).getMetrics();
metrics.forEach((k, v) -> {
assertTrue("Unexpected type of " + k + ": " + v.getClass().getName() + ", " + v, v instanceof AggregateMetric);
AggregateMetric am = (AggregateMetric) v;
if (!k.startsWith("REPLICATION.peerSync")) {
assertEquals(coreName + "::" + registryName + "::" + k + ": " + am.toString(), numReplicas, am.size());
}
});
} else {
assertFalse(metricManager.registryNames() + " contains " + leaderRegistryName + " but it's not a leader!", metricManager.registryNames().contains(leaderRegistryName));
Map<String, Metric> metrics = metricManager.registry(leaderRegistryName).getMetrics();
metrics.forEach((k, v) -> {
assertTrue("Unexpected type of " + k + ": " + v.getClass().getName() + ", " + v, v instanceof AggregateMetric);
AggregateMetric am = (AggregateMetric) v;
if (!k.startsWith("REPLICATION.peerSync")) {
assertEquals(coreName + "::" + registryName + "::" + k + ": " + am.toString(), 1, am.size());
}
});
}
assertTrue(metricManager.registryNames() + " doesn't contain " + registryName, metricManager.registryNames().contains(registryName));
}
}
SolrMetricManager metricManager = controlJetty.getCoreContainer().getMetricManager();
assertTrue(metricManager.registryNames().contains("solr.cluster"));
}
use of org.apache.solr.client.solrj.embedded.JettySolrRunner in project lucene-solr by apache.
the class CleanupOldIndexTest method test.
@Test
public void test() throws Exception {
CollectionAdminRequest.createCollection(COLLECTION, "conf1", 1, 2).processAndWait(cluster.getSolrClient(), DEFAULT_TIMEOUT);
// TODO make this configurable on StoppableIndexingThread
cluster.getSolrClient().setDefaultCollection(COLLECTION);
int[] maxDocList = new int[] { 300, 500, 700 };
int maxDoc = maxDocList[random().nextInt(maxDocList.length - 1)];
StoppableIndexingThread indexThread = new StoppableIndexingThread(null, cluster.getSolrClient(), "1", true, maxDoc, 1, true);
indexThread.start();
// give some time to index...
int[] waitTimes = new int[] { 3000, 4000 };
Thread.sleep(waitTimes[random().nextInt(waitTimes.length - 1)]);
// create some "old" index directories
JettySolrRunner jetty = cluster.getRandomJetty(random());
CoreContainer coreContainer = jetty.getCoreContainer();
File dataDir = null;
try (SolrCore solrCore = coreContainer.getCore(coreContainer.getCoreDescriptors().get(0).getName())) {
dataDir = new File(solrCore.getDataDir());
}
assertTrue(dataDir.isDirectory());
long msInDay = 60 * 60 * 24L;
String timestamp1 = new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(new Date(1 * msInDay));
String timestamp2 = new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(new Date(2 * msInDay));
File oldIndexDir1 = new File(dataDir, "index." + timestamp1);
FileUtils.forceMkdir(oldIndexDir1);
File oldIndexDir2 = new File(dataDir, "index." + timestamp2);
FileUtils.forceMkdir(oldIndexDir2);
// verify the "old" index directories exist
assertTrue(oldIndexDir1.isDirectory());
assertTrue(oldIndexDir2.isDirectory());
// bring shard replica down
ChaosMonkey.stop(jetty);
// wait a moment - lets allow some docs to be indexed so replication time is non 0
Thread.sleep(waitTimes[random().nextInt(waitTimes.length - 1)]);
// bring shard replica up
ChaosMonkey.start(jetty);
// make sure replication can start
Thread.sleep(3000);
// stop indexing threads
indexThread.safeStop();
indexThread.join();
cluster.getSolrClient().waitForState(COLLECTION, DEFAULT_TIMEOUT, TimeUnit.SECONDS, (n, c) -> DocCollection.isFullyActive(n, c, 1, 2));
assertTrue(!oldIndexDir1.isDirectory());
assertTrue(!oldIndexDir2.isDirectory());
}
use of org.apache.solr.client.solrj.embedded.JettySolrRunner in project lucene-solr by apache.
the class CollectionTooManyReplicasTest method testDownedShards.
@Test
public void testDownedShards() throws Exception {
String collectionName = "TooManyReplicasWhenAddingDownedNode";
CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "shardstart", 1).setMaxShardsPerNode(2).process(cluster.getSolrClient());
// Shut down a Jetty, I really don't care which
JettySolrRunner jetty = cluster.getRandomJetty(random());
String deadNode = jetty.getBaseUrl().toString();
cluster.stopJettySolrRunner(jetty);
try {
// Adding a replica on a dead node should fail
Exception e1 = expectThrows(Exception.class, () -> {
CollectionAdminRequest.addReplicaToShard(collectionName, "shardstart").setNode(deadNode).process(cluster.getSolrClient());
});
assertTrue("Should have gotten a message about shard not ", e1.getMessage().contains("At least one of the node(s) specified are not currently active, no action taken."));
// Should also die if we just add a shard
Exception e2 = expectThrows(Exception.class, () -> {
CollectionAdminRequest.createShard(collectionName, "shard1").setNodeSet(deadNode).process(cluster.getSolrClient());
});
assertTrue("Should have gotten a message about shard not ", e2.getMessage().contains("At least one of the node(s) specified are not currently active, no action taken."));
} finally {
cluster.startJettySolrRunner(jetty);
}
}
Aggregations