Search in sources :

Example 51 with CloudSolrClient

use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.

the class ShardSplitTest method splitByRouteFieldTest.

public void splitByRouteFieldTest() throws Exception {
    log.info("Starting testSplitWithRouteField");
    String collectionName = "routeFieldColl";
    int numShards = 4;
    int replicationFactor = 2;
    int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient().getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
    HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
    String shard_fld = "shard_s";
    try (CloudSolrClient client = createCloudClient(null)) {
        Map<String, Object> props = Utils.makeMap(REPLICATION_FACTOR, replicationFactor, MAX_SHARDS_PER_NODE, maxShardsPerNode, NUM_SLICES, numShards, "router.field", shard_fld);
        createCollection(collectionInfos, collectionName, props, client);
    }
    List<Integer> list = collectionInfos.get(collectionName);
    checkForCollection(collectionName, list, null);
    waitForRecoveriesToFinish(false);
    String url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
    try (HttpSolrClient collectionClient = getHttpSolrClient(url)) {
        ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
        final DocRouter router = clusterState.getCollection(collectionName).getRouter();
        Slice shard1 = clusterState.getSlice(collectionName, SHARD1);
        DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
        final List<DocRouter.Range> ranges = router.partitionRange(2, shard1Range);
        final int[] docCounts = new int[ranges.size()];
        for (int i = 100; i <= 200; i++) {
            // See comment in ShardRoutingTest for hash distribution
            String shardKey = "" + (char) ('a' + (i % 26));
            collectionClient.add(getDoc(id, i, "n_ti", i, shard_fld, shardKey));
            int idx = getHashRangeIdx(router, ranges, shardKey);
            if (idx != -1) {
                docCounts[idx]++;
            }
        }
        for (int i = 0; i < docCounts.length; i++) {
            int docCount = docCounts[i];
            log.info("Shard {} docCount = {}", "shard1_" + i, docCount);
        }
        collectionClient.commit();
        for (int i = 0; i < 3; i++) {
            try {
                splitShard(collectionName, SHARD1, null, null);
                break;
            } catch (HttpSolrClient.RemoteSolrException e) {
                if (e.code() != 500) {
                    throw e;
                }
                log.error("SPLITSHARD failed. " + (i < 2 ? " Retring split" : ""), e);
                if (i == 2) {
                    fail("SPLITSHARD was not successful even after three tries");
                }
            }
        }
        waitForRecoveriesToFinish(collectionName, false);
        assertEquals(docCounts[0], collectionClient.query(new SolrQuery("*:*").setParam("shards", "shard1_0")).getResults().getNumFound());
        assertEquals(docCounts[1], collectionClient.query(new SolrQuery("*:*").setParam("shards", "shard1_1")).getResults().getNumFound());
    }
}
Also used : ClusterState(org.apache.solr.common.cloud.ClusterState) HashMap(java.util.HashMap) SolrQuery(org.apache.solr.client.solrj.SolrQuery) CloudSolrClient(org.apache.solr.client.solrj.impl.CloudSolrClient) HttpSolrClient(org.apache.solr.client.solrj.impl.HttpSolrClient) Slice(org.apache.solr.common.cloud.Slice) DocRouter(org.apache.solr.common.cloud.DocRouter) List(java.util.List) ArrayList(java.util.ArrayList)

Example 52 with CloudSolrClient

use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.

the class TestTlogReplica method testOnlyLeaderIndexes.

public void testOnlyLeaderIndexes() throws Exception {
    createAndWaitForCollection(1, 0, 2, 0);
    CloudSolrClient cloudClient = cluster.getSolrClient();
    new UpdateRequest().add(sdoc("id", "1")).add(sdoc("id", "2")).add(sdoc("id", "3")).add(sdoc("id", "4")).process(cloudClient, collectionName);
    {
        UpdateHandler updateHandler = getSolrCore(true).get(0).getUpdateHandler();
        RefCounted<IndexWriter> iwRef = updateHandler.getSolrCoreState().getIndexWriter(null);
        assertTrue("IndexWriter at leader must see updates ", iwRef.get().hasUncommittedChanges());
        iwRef.decref();
    }
    for (SolrCore solrCore : getSolrCore(false)) {
        RefCounted<IndexWriter> iwRef = solrCore.getUpdateHandler().getSolrCoreState().getIndexWriter(null);
        assertFalse("IndexWriter at replicas must not see updates ", iwRef.get().hasUncommittedChanges());
        iwRef.decref();
    }
    checkRTG(1, 4, cluster.getJettySolrRunners());
    new UpdateRequest().deleteById("1").deleteByQuery("id:2").process(cloudClient, collectionName);
    // The DBQ is not processed at replicas, so we still can get doc2 and other docs by RTG
    checkRTG(2, 4, getSolrRunner(false));
    new UpdateRequest().commit(cloudClient, collectionName);
    waitForNumDocsInAllActiveReplicas(2);
    // Update log roll over
    for (SolrCore solrCore : getSolrCore(false)) {
        UpdateLog updateLog = solrCore.getUpdateHandler().getUpdateLog();
        assertFalse(updateLog.hasUncommittedChanges());
    }
    // UpdateLog copy over old updates
    for (int i = 15; i <= 150; i++) {
        cloudClient.add(collectionName, sdoc("id", String.valueOf(i)));
        if (random().nextInt(100) < 15 & i != 150) {
            cloudClient.commit(collectionName);
        }
    }
    checkRTG(120, 150, cluster.getJettySolrRunners());
    waitForReplicasCatchUp(20);
}
Also used : UpdateHandler(org.apache.solr.update.UpdateHandler) UpdateRequest(org.apache.solr.client.solrj.request.UpdateRequest) SolrIndexWriter(org.apache.solr.update.SolrIndexWriter) IndexWriter(org.apache.lucene.index.IndexWriter) RefCounted(org.apache.solr.util.RefCounted) SolrCore(org.apache.solr.core.SolrCore) UpdateLog(org.apache.solr.update.UpdateLog) CloudSolrClient(org.apache.solr.client.solrj.impl.CloudSolrClient)

Example 53 with CloudSolrClient

use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.

the class TestTlogReplica method testBasicLeaderElection.

public void testBasicLeaderElection() throws Exception {
    createAndWaitForCollection(1, 0, 2, 0);
    CloudSolrClient cloudClient = cluster.getSolrClient();
    new UpdateRequest().deleteByQuery("*:*").commit(cluster.getSolrClient(), collectionName);
    new UpdateRequest().add(sdoc("id", "1")).add(sdoc("id", "2")).process(cloudClient, collectionName);
    JettySolrRunner oldLeaderJetty = getSolrRunner(true).get(0);
    ChaosMonkey.kill(oldLeaderJetty);
    waitForState("Replica not removed", collectionName, activeReplicaCount(0, 1, 0));
    new UpdateRequest().add(sdoc("id", "3")).add(sdoc("id", "4")).process(cloudClient, collectionName);
    ChaosMonkey.start(oldLeaderJetty);
    waitForState("Replica not added", collectionName, activeReplicaCount(0, 2, 0));
    checkRTG(1, 4, cluster.getJettySolrRunners());
    new UpdateRequest().commit(cloudClient, collectionName);
    waitForNumDocsInAllActiveReplicas(4, 0);
}
Also used : UpdateRequest(org.apache.solr.client.solrj.request.UpdateRequest) JettySolrRunner(org.apache.solr.client.solrj.embedded.JettySolrRunner) CloudSolrClient(org.apache.solr.client.solrj.impl.CloudSolrClient)

Example 54 with CloudSolrClient

use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.

the class TestTlogReplica method testDeleteById.

public void testDeleteById() throws Exception {
    createAndWaitForCollection(1, 0, 2, 0);
    CloudSolrClient cloudClient = cluster.getSolrClient();
    new UpdateRequest().deleteByQuery("*:*").commit(cluster.getSolrClient(), collectionName);
    new UpdateRequest().add(sdoc("id", "1")).commit(cloudClient, collectionName);
    waitForNumDocsInAllActiveReplicas(1);
    new UpdateRequest().deleteById("1").process(cloudClient, collectionName);
    boolean successs = false;
    try {
        checkRTG(1, 1, cluster.getJettySolrRunners());
        successs = true;
    } catch (AssertionError e) {
    //expected
    }
    assertFalse("Doc1 is deleted but it's still exist", successs);
}
Also used : UpdateRequest(org.apache.solr.client.solrj.request.UpdateRequest) CloudSolrClient(org.apache.solr.client.solrj.impl.CloudSolrClient)

Example 55 with CloudSolrClient

use of org.apache.solr.client.solrj.impl.CloudSolrClient in project lucene-solr by apache.

the class TestSolrCloudWithKerberosAlt method testCollectionCreateSearchDelete.

protected void testCollectionCreateSearchDelete() throws Exception {
    String collectionName = "testkerberoscollection";
    MiniSolrCloudCluster miniCluster = new MiniSolrCloudCluster(NUM_SERVERS, createTempDir(), JettyConfig.builder().setContext("/solr").build());
    CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
    cloudSolrClient.setDefaultCollection(collectionName);
    try {
        assertNotNull(miniCluster.getZkServer());
        List<JettySolrRunner> jettys = miniCluster.getJettySolrRunners();
        assertEquals(NUM_SERVERS, jettys.size());
        for (JettySolrRunner jetty : jettys) {
            assertTrue(jetty.isRunning());
        }
        // create collection
        String configName = "solrCloudCollectionConfig";
        miniCluster.uploadConfigSet(SolrTestCaseJ4.TEST_PATH().resolve("collection1/conf"), configName);
        CollectionAdminRequest.Create createRequest = CollectionAdminRequest.createCollection(collectionName, NUM_SHARDS, REPLICATION_FACTOR);
        Properties properties = new Properties();
        properties.put(CoreDescriptor.CORE_CONFIG, "solrconfig-tlog.xml");
        properties.put("solr.tests.maxBufferedDocs", "100000");
        properties.put("solr.tests.ramBufferSizeMB", "100");
        // use non-test classes so RandomizedRunner isn't necessary
        properties.put(SolrTestCaseJ4.SYSTEM_PROPERTY_SOLR_TESTS_MERGEPOLICYFACTORY, TieredMergePolicyFactory.class.getName());
        properties.put("solr.tests.mergeScheduler", "org.apache.lucene.index.ConcurrentMergeScheduler");
        properties.put("solr.directoryFactory", "solr.RAMDirectoryFactory");
        createRequest.setProperties(properties);
        createRequest.process(cloudSolrClient);
        try (SolrZkClient zkClient = new SolrZkClient(miniCluster.getZkServer().getZkAddress(), AbstractZkTestCase.TIMEOUT, AbstractZkTestCase.TIMEOUT, null);
            ZkStateReader zkStateReader = new ZkStateReader(zkClient)) {
            zkStateReader.createClusterStateWatchersAndUpdate();
            AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
            // modify/query collection
            SolrInputDocument doc = new SolrInputDocument();
            doc.setField("id", "1");
            cloudSolrClient.add(doc);
            cloudSolrClient.commit();
            SolrQuery query = new SolrQuery();
            query.setQuery("*:*");
            QueryResponse rsp = cloudSolrClient.query(query);
            assertEquals(1, rsp.getResults().getNumFound());
            // delete the collection we created earlier
            CollectionAdminRequest.deleteCollection(collectionName).process(cloudSolrClient);
            AbstractDistribZkTestBase.waitForCollectionToDisappear(collectionName, zkStateReader, true, true, 330);
        }
    } finally {
        cloudSolrClient.close();
        miniCluster.shutdown();
    }
}
Also used : TieredMergePolicyFactory(org.apache.solr.index.TieredMergePolicyFactory) JettySolrRunner(org.apache.solr.client.solrj.embedded.JettySolrRunner) CollectionAdminRequest(org.apache.solr.client.solrj.request.CollectionAdminRequest) Properties(java.util.Properties) SolrZkClient(org.apache.solr.common.cloud.SolrZkClient) SolrQuery(org.apache.solr.client.solrj.SolrQuery) CloudSolrClient(org.apache.solr.client.solrj.impl.CloudSolrClient) ZkStateReader(org.apache.solr.common.cloud.ZkStateReader) SolrInputDocument(org.apache.solr.common.SolrInputDocument) QueryResponse(org.apache.solr.client.solrj.response.QueryResponse)

Aggregations

CloudSolrClient (org.apache.solr.client.solrj.impl.CloudSolrClient)140 Test (org.junit.Test)52 ArrayList (java.util.ArrayList)40 SolrQuery (org.apache.solr.client.solrj.SolrQuery)30 HashMap (java.util.HashMap)26 QueryResponse (org.apache.solr.client.solrj.response.QueryResponse)25 SolrInputDocument (org.apache.solr.common.SolrInputDocument)25 CollectionAdminRequest (org.apache.solr.client.solrj.request.CollectionAdminRequest)24 Slice (org.apache.solr.common.cloud.Slice)24 JettySolrRunner (org.apache.solr.client.solrj.embedded.JettySolrRunner)22 List (java.util.List)21 ZkStateReader (org.apache.solr.common.cloud.ZkStateReader)21 Map (java.util.Map)20 ModifiableSolrParams (org.apache.solr.common.params.ModifiableSolrParams)20 QueryRequest (org.apache.solr.client.solrj.request.QueryRequest)19 NamedList (org.apache.solr.common.util.NamedList)18 UpdateRequest (org.apache.solr.client.solrj.request.UpdateRequest)17 Replica (org.apache.solr.common.cloud.Replica)17 SolrRequest (org.apache.solr.client.solrj.SolrRequest)15 DocCollection (org.apache.solr.common.cloud.DocCollection)15