Search in sources :

Example 41 with JettySolrRunner

use of org.apache.solr.client.solrj.embedded.JettySolrRunner in project lucene-solr by apache.

the class ShardRoutingCustomTest method doCustomSharding.

private void doCustomSharding() throws Exception {
    printLayout();
    int totalReplicas = getTotalReplicas(collection);
    File jettyDir = createTempDir("jetty").toFile();
    jettyDir.mkdirs();
    setupJettySolrHome(jettyDir);
    JettySolrRunner j = createJetty(jettyDir, createTempDir().toFile().getAbsolutePath(), "shardA", "solrconfig.xml", null);
    jettys.add(j);
    SolrClient client = createNewSolrClient(j.getLocalPort());
    clients.add(client);
    int retries = 60;
    while (--retries >= 0) {
        // total replicas changed.. assume it was us
        if (getTotalReplicas(collection) != totalReplicas) {
            break;
        }
        Thread.sleep(500);
    }
    if (retries <= 0) {
        fail("Timeout waiting for " + j + " to appear in clusterstate");
        printLayout();
    }
    updateMappingsFromZk(this.jettys, this.clients);
    printLayout();
}
Also used : SolrClient(org.apache.solr.client.solrj.SolrClient) JettySolrRunner(org.apache.solr.client.solrj.embedded.JettySolrRunner) File(java.io.File)

Example 42 with JettySolrRunner

use of org.apache.solr.client.solrj.embedded.JettySolrRunner in project lucene-solr by apache.

the class HdfsCollectionsAPIDistributedZkTest method moveReplicaTest.

@Test
public void moveReplicaTest() throws Exception {
    cluster.waitForAllNodes(5000);
    String coll = "movereplicatest_coll";
    CloudSolrClient cloudClient = cluster.getSolrClient();
    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf", 2, 2);
    create.setMaxShardsPerNode(2);
    cloudClient.request(create);
    for (int i = 0; i < 10; i++) {
        cloudClient.add(coll, sdoc("id", String.valueOf(i)));
        cloudClient.commit(coll);
    }
    List<Slice> slices = new ArrayList<>(cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlices());
    Collections.shuffle(slices, random());
    Slice slice = null;
    Replica replica = null;
    for (Slice s : slices) {
        slice = s;
        for (Replica r : s.getReplicas()) {
            if (s.getLeader() != r) {
                replica = r;
            }
        }
    }
    String dataDir = getDataDir(replica);
    Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
    ArrayList<String> l = new ArrayList<>(liveNodes);
    Collections.shuffle(l, random());
    String targetNode = null;
    for (String node : liveNodes) {
        if (!replica.getNodeName().equals(node)) {
            targetNode = node;
            break;
        }
    }
    assertNotNull(targetNode);
    CollectionAdminRequest.MoveReplica moveReplica = new CollectionAdminRequest.MoveReplica(coll, replica.getName(), targetNode);
    moveReplica.process(cloudClient);
    checkNumOfCores(cloudClient, replica.getNodeName(), 0);
    checkNumOfCores(cloudClient, targetNode, 2);
    waitForState("Wait for recovery finish failed", coll, clusterShape(2, 2));
    slice = cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlice(slice.getName());
    boolean found = false;
    for (Replica newReplica : slice.getReplicas()) {
        if (getDataDir(newReplica).equals(dataDir)) {
            found = true;
        }
    }
    assertTrue(found);
    // data dir is reused so replication will be skipped
    for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
        SolrMetricManager manager = jetty.getCoreContainer().getMetricManager();
        List<String> registryNames = manager.registryNames().stream().filter(s -> s.startsWith("solr.core.")).collect(Collectors.toList());
        for (String registry : registryNames) {
            Map<String, Metric> metrics = manager.registry(registry).getMetrics();
            Counter counter = (Counter) metrics.get("REPLICATION./replication.requests");
            if (counter != null) {
                assertEquals(0, counter.getCount());
            }
        }
    }
}
Also used : Nightly(com.carrotsearch.randomizedtesting.annotations.Nightly) BadHdfsThreadsFilter(org.apache.solr.util.BadHdfsThreadsFilter) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) BeforeClass(org.junit.BeforeClass) Slow(org.apache.lucene.util.LuceneTestCase.Slow) CoreAdminResponse(org.apache.solr.client.solrj.response.CoreAdminResponse) ArrayList(java.util.ArrayList) SolrServerException(org.apache.solr.client.solrj.SolrServerException) Map(java.util.Map) ThreadLeakFilters(com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters) Counter(com.codahale.metrics.Counter) JettySolrRunner(org.apache.solr.client.solrj.embedded.JettySolrRunner) CloudSolrClient(org.apache.solr.client.solrj.impl.CloudSolrClient) CoreStatus(org.apache.solr.client.solrj.request.CoreStatus) AfterClass(org.junit.AfterClass) Slice(org.apache.solr.common.cloud.Slice) Set(java.util.Set) Metric(com.codahale.metrics.Metric) IOException(java.io.IOException) Test(org.junit.Test) Collectors(java.util.stream.Collectors) Replica(org.apache.solr.common.cloud.Replica) List(java.util.List) HttpSolrClient(org.apache.solr.client.solrj.impl.HttpSolrClient) SolrMetricManager(org.apache.solr.metrics.SolrMetricManager) ZkConfigManager(org.apache.solr.common.cloud.ZkConfigManager) Collections(java.util.Collections) CoreAdminRequest(org.apache.solr.client.solrj.request.CoreAdminRequest) CollectionAdminRequest(org.apache.solr.client.solrj.request.CollectionAdminRequest) CollectionsAPIDistributedZkTest(org.apache.solr.cloud.CollectionsAPIDistributedZkTest) JettySolrRunner(org.apache.solr.client.solrj.embedded.JettySolrRunner) ArrayList(java.util.ArrayList) CollectionAdminRequest(org.apache.solr.client.solrj.request.CollectionAdminRequest) Replica(org.apache.solr.common.cloud.Replica) CloudSolrClient(org.apache.solr.client.solrj.impl.CloudSolrClient) Counter(com.codahale.metrics.Counter) Slice(org.apache.solr.common.cloud.Slice) SolrMetricManager(org.apache.solr.metrics.SolrMetricManager) Metric(com.codahale.metrics.Metric) Test(org.junit.Test) CollectionsAPIDistributedZkTest(org.apache.solr.cloud.CollectionsAPIDistributedZkTest)

Example 43 with JettySolrRunner

use of org.apache.solr.client.solrj.embedded.JettySolrRunner in project lucene-solr by apache.

the class HdfsWriteToMultipleCollectionsTest method test.

@Test
public void test() throws Exception {
    int docCount = random().nextInt(1313) + 1;
    int cnt = random().nextInt(4) + 1;
    for (int i = 0; i < cnt; i++) {
        createCollection(ACOLLECTION + i, 2, 2, 9);
    }
    for (int i = 0; i < cnt; i++) {
        waitForRecoveriesToFinish(ACOLLECTION + i, false);
    }
    List<CloudSolrClient> cloudClients = new ArrayList<>();
    List<StoppableIndexingThread> threads = new ArrayList<>();
    for (int i = 0; i < cnt; i++) {
        CloudSolrClient client = getCloudSolrClient(zkServer.getZkAddress());
        client.setDefaultCollection(ACOLLECTION + i);
        cloudClients.add(client);
        StoppableIndexingThread indexThread = new StoppableIndexingThread(null, client, "1", true, docCount, 1, true);
        threads.add(indexThread);
        indexThread.start();
    }
    int addCnt = 0;
    for (StoppableIndexingThread thread : threads) {
        thread.join();
        addCnt += thread.getNumAdds() - thread.getNumDeletes();
    }
    long collectionsCount = 0;
    for (CloudSolrClient client : cloudClients) {
        client.commit();
        collectionsCount += client.query(new SolrQuery("*:*")).getResults().getNumFound();
    }
    IOUtils.close(cloudClients);
    assertEquals(addCnt, collectionsCount);
    BlockCache lastBlockCache = null;
    // assert that we are using the block directory and that write and read caching are being used
    for (JettySolrRunner jetty : jettys) {
        CoreContainer cores = jetty.getCoreContainer();
        Collection<SolrCore> solrCores = cores.getCores();
        for (SolrCore core : solrCores) {
            if (core.getCoreDescriptor().getCloudDescriptor().getCollectionName().startsWith(ACOLLECTION)) {
                DirectoryFactory factory = core.getDirectoryFactory();
                assertTrue("Found: " + core.getDirectoryFactory().getClass().getName(), factory instanceof HdfsDirectoryFactory);
                Directory dir = factory.get(core.getDataDir(), null, null);
                try {
                    long dataDirSize = factory.size(dir);
                    FileSystem fileSystem = null;
                    fileSystem = FileSystem.newInstance(new Path(core.getDataDir()).toUri(), new Configuration());
                    long size = fileSystem.getContentSummary(new Path(core.getDataDir())).getLength();
                    assertEquals(size, dataDirSize);
                } finally {
                    core.getDirectoryFactory().release(dir);
                }
                RefCounted<IndexWriter> iwRef = core.getUpdateHandler().getSolrCoreState().getIndexWriter(core);
                try {
                    IndexWriter iw = iwRef.get();
                    NRTCachingDirectory directory = (NRTCachingDirectory) iw.getDirectory();
                    BlockDirectory blockDirectory = (BlockDirectory) directory.getDelegate();
                    assertTrue(blockDirectory.isBlockCacheReadEnabled());
                    // see SOLR-6424
                    assertFalse(blockDirectory.isBlockCacheWriteEnabled());
                    Cache cache = blockDirectory.getCache();
                    // we know it's a BlockDirectoryCache, but future proof
                    assertTrue(cache instanceof BlockDirectoryCache);
                    BlockCache blockCache = ((BlockDirectoryCache) cache).getBlockCache();
                    if (lastBlockCache != null) {
                        if (Boolean.getBoolean(SOLR_HDFS_BLOCKCACHE_GLOBAL)) {
                            assertEquals(lastBlockCache, blockCache);
                        } else {
                            assertNotSame(lastBlockCache, blockCache);
                        }
                    }
                    lastBlockCache = blockCache;
                } finally {
                    iwRef.decref();
                }
            }
        }
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) SolrCore(org.apache.solr.core.SolrCore) BlockDirectoryCache(org.apache.solr.store.blockcache.BlockDirectoryCache) ArrayList(java.util.ArrayList) HdfsDirectoryFactory(org.apache.solr.core.HdfsDirectoryFactory) SolrQuery(org.apache.solr.client.solrj.SolrQuery) CloudSolrClient(org.apache.solr.client.solrj.impl.CloudSolrClient) StoppableIndexingThread(org.apache.solr.cloud.StoppableIndexingThread) CoreContainer(org.apache.solr.core.CoreContainer) DirectoryFactory(org.apache.solr.core.DirectoryFactory) HdfsDirectoryFactory(org.apache.solr.core.HdfsDirectoryFactory) FileSystem(org.apache.hadoop.fs.FileSystem) BlockDirectory(org.apache.solr.store.blockcache.BlockDirectory) Directory(org.apache.lucene.store.Directory) NRTCachingDirectory(org.apache.lucene.store.NRTCachingDirectory) Path(org.apache.hadoop.fs.Path) JettySolrRunner(org.apache.solr.client.solrj.embedded.JettySolrRunner) NRTCachingDirectory(org.apache.lucene.store.NRTCachingDirectory) BlockDirectory(org.apache.solr.store.blockcache.BlockDirectory) IndexWriter(org.apache.lucene.index.IndexWriter) BlockCache(org.apache.solr.store.blockcache.BlockCache) Cache(org.apache.solr.store.blockcache.Cache) BlockDirectoryCache(org.apache.solr.store.blockcache.BlockDirectoryCache) BlockCache(org.apache.solr.store.blockcache.BlockCache) BasicDistributedZkTest(org.apache.solr.cloud.BasicDistributedZkTest) Test(org.junit.Test)

Example 44 with JettySolrRunner

use of org.apache.solr.client.solrj.embedded.JettySolrRunner in project lucene-solr by apache.

the class TestTolerantUpdateProcessorCloud method assertSpinLoopAllJettyAreRunning.

/**
   * HACK: Loops over every Jetty instance in the specified MiniSolrCloudCluster to see if they are running,
   * and sleeps small increments until they all report that they are, or a max num iters is reached
   * 
   * (work around for SOLR-8862.  Maybe something like this should be promoted into MiniSolrCloudCluster's 
   * start() method? or SolrCloudTestCase's configureCluster?)
   */
public static void assertSpinLoopAllJettyAreRunning(MiniSolrCloudCluster cluster) throws InterruptedException {
    // NOTE: idealy we could use an ExecutorService that tried to open Sockets (with a long timeout)
    // to each of the jetty instances in parallel w/o any sleeping -- but since they pick their ports
    // dynamically and don't report them until/unless the server is up, that won't neccessarily do us
    // any good.
    final int numServers = cluster.getJettySolrRunners().size();
    int numRunning = 0;
    for (int i = 5; 0 <= i; i--) {
        numRunning = 0;
        for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
            if (jetty.isRunning()) {
                numRunning++;
            }
        }
        if (numServers == numRunning) {
            return;
        } else if (0 == i) {
            // give up
            break;
        }
        // the more nodes we're waiting on, the longer we should try to sleep (within reason)
        Thread.sleep(Math.min((numServers - numRunning) * 100, 1000));
    }
    assertEquals("giving up waiting for all jetty instances to be running", numServers, numRunning);
}
Also used : JettySolrRunner(org.apache.solr.client.solrj.embedded.JettySolrRunner)

Example 45 with JettySolrRunner

use of org.apache.solr.client.solrj.embedded.JettySolrRunner in project lucene-solr by apache.

the class RulesTest method testHostFragmentRuleThrowsExceptionWhenIpDoesNotMatch.

@Test
public void testHostFragmentRuleThrowsExceptionWhenIpDoesNotMatch() throws Exception {
    String rulesColl = "ipRuleColl";
    JettySolrRunner jetty = cluster.getRandomJetty(random());
    String host = jetty.getBaseUrl().getHost();
    String[] ipFragments = host.split("\\.");
    String ip_1 = ipFragments[ipFragments.length - 1];
    String ip_2 = ipFragments[ipFragments.length - 2];
    expectedException.expect(HttpSolrClient.RemoteSolrException.class);
    expectedException.expectMessage(containsString("ip_1"));
    CollectionAdminRequest.createCollectionWithImplicitRouter(rulesColl, "conf", "shard1", 2).setRule("ip_2:" + ip_2, "ip_1:" + ip_1 + "9999").setSnitch("class:ImplicitSnitch").process(cluster.getSolrClient());
}
Also used : HttpSolrClient(org.apache.solr.client.solrj.impl.HttpSolrClient) JettySolrRunner(org.apache.solr.client.solrj.embedded.JettySolrRunner) JUnitMatchers.containsString(org.junit.matchers.JUnitMatchers.containsString) Test(org.junit.Test)

Aggregations

JettySolrRunner (org.apache.solr.client.solrj.embedded.JettySolrRunner)137 Test (org.junit.Test)52 HttpSolrClient (org.apache.solr.client.solrj.impl.HttpSolrClient)29 Replica (org.apache.solr.common.cloud.Replica)28 CloudSolrClient (org.apache.solr.client.solrj.impl.CloudSolrClient)25 SolrInputDocument (org.apache.solr.common.SolrInputDocument)20 ModifiableSolrParams (org.apache.solr.common.params.ModifiableSolrParams)20 File (java.io.File)19 SolrQuery (org.apache.solr.client.solrj.SolrQuery)19 DocCollection (org.apache.solr.common.cloud.DocCollection)18 Slice (org.apache.solr.common.cloud.Slice)18 IOException (java.io.IOException)15 ArrayList (java.util.ArrayList)15 Properties (java.util.Properties)15 SolrClient (org.apache.solr.client.solrj.SolrClient)15 UpdateRequest (org.apache.solr.client.solrj.request.UpdateRequest)15 QueryResponse (org.apache.solr.client.solrj.response.QueryResponse)15 CoreContainer (org.apache.solr.core.CoreContainer)14 BeforeClass (org.junit.BeforeClass)14 ClusterState (org.apache.solr.common.cloud.ClusterState)13