use of org.apache.solr.client.solrj.embedded.JettySolrRunner in project lucene-solr by apache.
the class ShardRoutingCustomTest method doCustomSharding.
private void doCustomSharding() throws Exception {
printLayout();
int totalReplicas = getTotalReplicas(collection);
File jettyDir = createTempDir("jetty").toFile();
jettyDir.mkdirs();
setupJettySolrHome(jettyDir);
JettySolrRunner j = createJetty(jettyDir, createTempDir().toFile().getAbsolutePath(), "shardA", "solrconfig.xml", null);
jettys.add(j);
SolrClient client = createNewSolrClient(j.getLocalPort());
clients.add(client);
int retries = 60;
while (--retries >= 0) {
// total replicas changed.. assume it was us
if (getTotalReplicas(collection) != totalReplicas) {
break;
}
Thread.sleep(500);
}
if (retries <= 0) {
fail("Timeout waiting for " + j + " to appear in clusterstate");
printLayout();
}
updateMappingsFromZk(this.jettys, this.clients);
printLayout();
}
use of org.apache.solr.client.solrj.embedded.JettySolrRunner in project lucene-solr by apache.
the class HdfsCollectionsAPIDistributedZkTest method moveReplicaTest.
@Test
public void moveReplicaTest() throws Exception {
cluster.waitForAllNodes(5000);
String coll = "movereplicatest_coll";
CloudSolrClient cloudClient = cluster.getSolrClient();
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf", 2, 2);
create.setMaxShardsPerNode(2);
cloudClient.request(create);
for (int i = 0; i < 10; i++) {
cloudClient.add(coll, sdoc("id", String.valueOf(i)));
cloudClient.commit(coll);
}
List<Slice> slices = new ArrayList<>(cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlices());
Collections.shuffle(slices, random());
Slice slice = null;
Replica replica = null;
for (Slice s : slices) {
slice = s;
for (Replica r : s.getReplicas()) {
if (s.getLeader() != r) {
replica = r;
}
}
}
String dataDir = getDataDir(replica);
Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
ArrayList<String> l = new ArrayList<>(liveNodes);
Collections.shuffle(l, random());
String targetNode = null;
for (String node : liveNodes) {
if (!replica.getNodeName().equals(node)) {
targetNode = node;
break;
}
}
assertNotNull(targetNode);
CollectionAdminRequest.MoveReplica moveReplica = new CollectionAdminRequest.MoveReplica(coll, replica.getName(), targetNode);
moveReplica.process(cloudClient);
checkNumOfCores(cloudClient, replica.getNodeName(), 0);
checkNumOfCores(cloudClient, targetNode, 2);
waitForState("Wait for recovery finish failed", coll, clusterShape(2, 2));
slice = cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlice(slice.getName());
boolean found = false;
for (Replica newReplica : slice.getReplicas()) {
if (getDataDir(newReplica).equals(dataDir)) {
found = true;
}
}
assertTrue(found);
// data dir is reused so replication will be skipped
for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
SolrMetricManager manager = jetty.getCoreContainer().getMetricManager();
List<String> registryNames = manager.registryNames().stream().filter(s -> s.startsWith("solr.core.")).collect(Collectors.toList());
for (String registry : registryNames) {
Map<String, Metric> metrics = manager.registry(registry).getMetrics();
Counter counter = (Counter) metrics.get("REPLICATION./replication.requests");
if (counter != null) {
assertEquals(0, counter.getCount());
}
}
}
}
use of org.apache.solr.client.solrj.embedded.JettySolrRunner in project lucene-solr by apache.
the class HdfsWriteToMultipleCollectionsTest method test.
@Test
public void test() throws Exception {
int docCount = random().nextInt(1313) + 1;
int cnt = random().nextInt(4) + 1;
for (int i = 0; i < cnt; i++) {
createCollection(ACOLLECTION + i, 2, 2, 9);
}
for (int i = 0; i < cnt; i++) {
waitForRecoveriesToFinish(ACOLLECTION + i, false);
}
List<CloudSolrClient> cloudClients = new ArrayList<>();
List<StoppableIndexingThread> threads = new ArrayList<>();
for (int i = 0; i < cnt; i++) {
CloudSolrClient client = getCloudSolrClient(zkServer.getZkAddress());
client.setDefaultCollection(ACOLLECTION + i);
cloudClients.add(client);
StoppableIndexingThread indexThread = new StoppableIndexingThread(null, client, "1", true, docCount, 1, true);
threads.add(indexThread);
indexThread.start();
}
int addCnt = 0;
for (StoppableIndexingThread thread : threads) {
thread.join();
addCnt += thread.getNumAdds() - thread.getNumDeletes();
}
long collectionsCount = 0;
for (CloudSolrClient client : cloudClients) {
client.commit();
collectionsCount += client.query(new SolrQuery("*:*")).getResults().getNumFound();
}
IOUtils.close(cloudClients);
assertEquals(addCnt, collectionsCount);
BlockCache lastBlockCache = null;
// assert that we are using the block directory and that write and read caching are being used
for (JettySolrRunner jetty : jettys) {
CoreContainer cores = jetty.getCoreContainer();
Collection<SolrCore> solrCores = cores.getCores();
for (SolrCore core : solrCores) {
if (core.getCoreDescriptor().getCloudDescriptor().getCollectionName().startsWith(ACOLLECTION)) {
DirectoryFactory factory = core.getDirectoryFactory();
assertTrue("Found: " + core.getDirectoryFactory().getClass().getName(), factory instanceof HdfsDirectoryFactory);
Directory dir = factory.get(core.getDataDir(), null, null);
try {
long dataDirSize = factory.size(dir);
FileSystem fileSystem = null;
fileSystem = FileSystem.newInstance(new Path(core.getDataDir()).toUri(), new Configuration());
long size = fileSystem.getContentSummary(new Path(core.getDataDir())).getLength();
assertEquals(size, dataDirSize);
} finally {
core.getDirectoryFactory().release(dir);
}
RefCounted<IndexWriter> iwRef = core.getUpdateHandler().getSolrCoreState().getIndexWriter(core);
try {
IndexWriter iw = iwRef.get();
NRTCachingDirectory directory = (NRTCachingDirectory) iw.getDirectory();
BlockDirectory blockDirectory = (BlockDirectory) directory.getDelegate();
assertTrue(blockDirectory.isBlockCacheReadEnabled());
// see SOLR-6424
assertFalse(blockDirectory.isBlockCacheWriteEnabled());
Cache cache = blockDirectory.getCache();
// we know it's a BlockDirectoryCache, but future proof
assertTrue(cache instanceof BlockDirectoryCache);
BlockCache blockCache = ((BlockDirectoryCache) cache).getBlockCache();
if (lastBlockCache != null) {
if (Boolean.getBoolean(SOLR_HDFS_BLOCKCACHE_GLOBAL)) {
assertEquals(lastBlockCache, blockCache);
} else {
assertNotSame(lastBlockCache, blockCache);
}
}
lastBlockCache = blockCache;
} finally {
iwRef.decref();
}
}
}
}
}
use of org.apache.solr.client.solrj.embedded.JettySolrRunner in project lucene-solr by apache.
the class TestTolerantUpdateProcessorCloud method assertSpinLoopAllJettyAreRunning.
/**
* HACK: Loops over every Jetty instance in the specified MiniSolrCloudCluster to see if they are running,
* and sleeps small increments until they all report that they are, or a max num iters is reached
*
* (work around for SOLR-8862. Maybe something like this should be promoted into MiniSolrCloudCluster's
* start() method? or SolrCloudTestCase's configureCluster?)
*/
public static void assertSpinLoopAllJettyAreRunning(MiniSolrCloudCluster cluster) throws InterruptedException {
// NOTE: idealy we could use an ExecutorService that tried to open Sockets (with a long timeout)
// to each of the jetty instances in parallel w/o any sleeping -- but since they pick their ports
// dynamically and don't report them until/unless the server is up, that won't neccessarily do us
// any good.
final int numServers = cluster.getJettySolrRunners().size();
int numRunning = 0;
for (int i = 5; 0 <= i; i--) {
numRunning = 0;
for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
if (jetty.isRunning()) {
numRunning++;
}
}
if (numServers == numRunning) {
return;
} else if (0 == i) {
// give up
break;
}
// the more nodes we're waiting on, the longer we should try to sleep (within reason)
Thread.sleep(Math.min((numServers - numRunning) * 100, 1000));
}
assertEquals("giving up waiting for all jetty instances to be running", numServers, numRunning);
}
use of org.apache.solr.client.solrj.embedded.JettySolrRunner in project lucene-solr by apache.
the class RulesTest method testHostFragmentRuleThrowsExceptionWhenIpDoesNotMatch.
@Test
public void testHostFragmentRuleThrowsExceptionWhenIpDoesNotMatch() throws Exception {
String rulesColl = "ipRuleColl";
JettySolrRunner jetty = cluster.getRandomJetty(random());
String host = jetty.getBaseUrl().getHost();
String[] ipFragments = host.split("\\.");
String ip_1 = ipFragments[ipFragments.length - 1];
String ip_2 = ipFragments[ipFragments.length - 2];
expectedException.expect(HttpSolrClient.RemoteSolrException.class);
expectedException.expectMessage(containsString("ip_1"));
CollectionAdminRequest.createCollectionWithImplicitRouter(rulesColl, "conf", "shard1", 2).setRule("ip_2:" + ip_2, "ip_1:" + ip_1 + "9999").setSnitch("class:ImplicitSnitch").process(cluster.getSolrClient());
}
Aggregations