Search in sources :

Example 1 with Create

use of org.apache.solr.client.solrj.request.CollectionAdminRequest.Create in project lucene-solr by apache.

the class MultiThreadedOCPTest method testTaskExclusivity.

private void testTaskExclusivity() throws Exception, SolrServerException {
    DistributedQueue distributedQueue = new DistributedQueue(cloudClient.getZkStateReader().getZkClient(), "/overseer/collection-queue-work", new Overseer.Stats());
    try (SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)))) {
        Create createCollectionRequest = CollectionAdminRequest.createCollection("ocptest_shardsplit", "conf1", 4, 1);
        createCollectionRequest.processAsync("1000", client);
        distributedQueue.offer(Utils.toJSON(Utils.makeMap("collection", "ocptest_shardsplit", QUEUE_OPERATION, MOCK_COLL_TASK.toLower(), ASYNC, "1001", "sleep", "100")));
        distributedQueue.offer(Utils.toJSON(Utils.makeMap("collection", "ocptest_shardsplit", QUEUE_OPERATION, MOCK_COLL_TASK.toLower(), ASYNC, "1002", "sleep", "100")));
        int iterations = 0;
        while (true) {
            int runningTasks = 0;
            int completedTasks = 0;
            for (int i = 1001; i <= 1002; i++) {
                final RequestStatusState state = getRequestState(i, client);
                if (state == RequestStatusState.RUNNING) {
                    runningTasks++;
                } else if (state == RequestStatusState.COMPLETED) {
                    completedTasks++;
                }
                assertNotSame("We have a failed SPLITSHARD task", RequestStatusState.FAILED, state);
            }
            // TODO: REQUESTSTATUS might come back with more than 1 running tasks over multiple calls.
            // The only way to fix this is to support checking of multiple requestids in a single REQUESTSTATUS task.
            assertTrue("Mutual exclusion failed. Found more than one task running for the same collection", runningTasks < 2);
            if (completedTasks == 2 || iterations++ > REQUEST_STATUS_TIMEOUT)
                break;
            try {
                Thread.sleep(1000);
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                return;
            }
        }
        for (int i = 1001; i <= 1002; i++) {
            final RequestStatusState state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, client);
            assertSame("Task " + i + " did not complete, final state: " + state, RequestStatusState.COMPLETED, state);
        }
    }
}
Also used : HttpSolrClient(org.apache.solr.client.solrj.impl.HttpSolrClient) SolrClient(org.apache.solr.client.solrj.SolrClient) HttpSolrClient(org.apache.solr.client.solrj.impl.HttpSolrClient) Create(org.apache.solr.client.solrj.request.CollectionAdminRequest.Create) RequestStatusState(org.apache.solr.client.solrj.response.RequestStatusState)

Example 2 with Create

use of org.apache.solr.client.solrj.request.CollectionAdminRequest.Create in project lucene-solr by apache.

the class SharedFSAutoReplicaFailoverTest method testBasics.

// very slow tests, especially since jetty is started and stopped
// serially
private void testBasics() throws Exception {
    String collection1 = "solrj_collection";
    Create createCollectionRequest = CollectionAdminRequest.createCollection(collection1, "conf1", 2, 2).setMaxShardsPerNode(2).setRouterField("myOwnField").setAutoAddReplicas(true);
    CollectionAdminResponse response = createCollectionRequest.process(cloudClient);
    assertEquals(0, response.getStatus());
    assertTrue(response.isSuccess());
    waitForRecoveriesToFinish(collection1, false);
    String collection2 = "solrj_collection2";
    createCollectionRequest = CollectionAdminRequest.createCollection(collection2, "conf1", 2, 2).setMaxShardsPerNode(2).setRouterField("myOwnField").setAutoAddReplicas(false);
    CollectionAdminResponse response2 = createCollectionRequest.process(getCommonCloudSolrClient());
    assertEquals(0, response2.getStatus());
    assertTrue(response2.isSuccess());
    waitForRecoveriesToFinish(collection2, false);
    String collection3 = "solrj_collection3";
    createCollectionRequest = CollectionAdminRequest.createCollection(collection3, "conf1", 5, 1).setMaxShardsPerNode(1).setRouterField("myOwnField").setAutoAddReplicas(true);
    CollectionAdminResponse response3 = createCollectionRequest.process(getCommonCloudSolrClient());
    assertEquals(0, response3.getStatus());
    assertTrue(response3.isSuccess());
    waitForRecoveriesToFinish(collection3, false);
    // a collection has only 1 replica per a shard
    String collection4 = "solrj_collection4";
    createCollectionRequest = CollectionAdminRequest.createCollection(collection4, "conf1", 5, 1).setMaxShardsPerNode(5).setRouterField("text").setAutoAddReplicas(true);
    CollectionAdminResponse response4 = createCollectionRequest.process(getCommonCloudSolrClient());
    assertEquals(0, response4.getStatus());
    assertTrue(response4.isSuccess());
    waitForRecoveriesToFinish(collection4, false);
    // all collections
    String[] collections = { collection1, collection2, collection3, collection4 };
    // add some documents to collection4
    final int numDocs = 100;
    // indexed but not committed
    addDocs(collection4, numDocs, false);
    // no result because not committed yet
    queryAndAssertResultSize(collection4, 0, 10000);
    assertUlogDir(collections);
    ChaosMonkey.stop(jettys.get(1));
    ChaosMonkey.stop(jettys.get(2));
    Thread.sleep(5000);
    assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLiveReplicas(cloudClient.getZkStateReader(), collection1, 120000));
    assertSliceAndReplicaCount(collection1);
    assertEquals(4, ClusterStateUtil.getLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection1));
    assertTrue(ClusterStateUtil.getLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection2) < 4);
    // collection3 has maxShardsPerNode=1, there are 4 standard jetties and one control jetty and 2 nodes stopped
    ClusterStateUtil.waitForLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection3, 3, 30000);
    // collection4 has maxShardsPerNode=5 and setMaxShardsPerNode=5
    ClusterStateUtil.waitForLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection4, 5, 30000);
    // all docs should be queried after failover
    // to query all docs
    cloudClient.commit();
    assertSingleReplicationAndShardSize(collection4, 5);
    queryAndAssertResultSize(collection4, numDocs, 10000);
    // collection1 should still be at 4
    assertEquals(4, ClusterStateUtil.getLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection1));
    // and collection2 less than 4
    assertTrue(ClusterStateUtil.getLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection2) < 4);
    assertUlogDir(collections);
    ChaosMonkey.stop(jettys);
    ChaosMonkey.stop(controlJetty);
    assertTrue("Timeout waiting for all not live", ClusterStateUtil.waitForAllReplicasNotLive(cloudClient.getZkStateReader(), 45000));
    ChaosMonkey.start(jettys);
    ChaosMonkey.start(controlJetty);
    assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLiveReplicas(cloudClient.getZkStateReader(), collection1, 120000));
    assertSliceAndReplicaCount(collection1);
    assertSingleReplicationAndShardSize(collection3, 5);
    // all docs should be queried
    assertSingleReplicationAndShardSize(collection4, 5);
    queryAndAssertResultSize(collection4, numDocs, 10000);
    assertUlogDir(collections);
    int jettyIndex = random().nextInt(jettys.size());
    ChaosMonkey.stop(jettys.get(jettyIndex));
    ChaosMonkey.start(jettys.get(jettyIndex));
    assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLiveReplicas(cloudClient.getZkStateReader(), collection1, 60000));
    assertSliceAndReplicaCount(collection1);
    assertUlogDir(collections);
    assertSingleReplicationAndShardSize(collection3, 5);
    ClusterStateUtil.waitForLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection3, 5, 30000);
    assertSingleReplicationAndShardSize(collection4, 5);
    ClusterStateUtil.waitForLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection4, 5, 30000);
    //disable autoAddReplicas
    Map m = makeMap("action", CollectionParams.CollectionAction.CLUSTERPROP.toLower(), "name", ZkStateReader.AUTO_ADD_REPLICAS, "val", "false");
    SolrRequest request = new QueryRequest(new MapSolrParams(m));
    request.setPath("/admin/collections");
    cloudClient.request(request);
    int currentCount = ClusterStateUtil.getLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection1);
    ChaosMonkey.stop(jettys.get(3));
    //solr.xml has defined workLoopDelay=10s and waitAfterExpiration=10s
    //Hence waiting for 30 seconds to be on the safe side.
    Thread.sleep(30000);
    //Ensures that autoAddReplicas has not kicked in.
    assertTrue(currentCount > ClusterStateUtil.getLiveAndActiveReplicaCount(cloudClient.getZkStateReader(), collection1));
    //enable autoAddReplicas
    m = makeMap("action", CollectionParams.CollectionAction.CLUSTERPROP.toLower(), "name", ZkStateReader.AUTO_ADD_REPLICAS);
    request = new QueryRequest(new MapSolrParams(m));
    request.setPath("/admin/collections");
    cloudClient.request(request);
    assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLiveReplicas(cloudClient.getZkStateReader(), collection1, 90000));
    assertSliceAndReplicaCount(collection1);
    assertUlogDir(collections);
    // restart all to test core saved state
    ChaosMonkey.stop(jettys);
    ChaosMonkey.stop(controlJetty);
    assertTrue("Timeout waiting for all not live", ClusterStateUtil.waitForAllReplicasNotLive(cloudClient.getZkStateReader(), 45000));
    ChaosMonkey.start(jettys);
    ChaosMonkey.start(controlJetty);
    assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLiveReplicas(cloudClient.getZkStateReader(), collection1, 120000));
    assertSliceAndReplicaCount(collection1);
    assertUlogDir(collections);
    assertSliceAndReplicaCount(collection1);
    assertSingleReplicationAndShardSize(collection3, 5);
    // all docs should be queried
    assertSingleReplicationAndShardSize(collection4, 5);
    queryAndAssertResultSize(collection4, numDocs, 10000);
}
Also used : MapSolrParams(org.apache.solr.common.params.MapSolrParams) CollectionAdminResponse(org.apache.solr.client.solrj.response.CollectionAdminResponse) QueryRequest(org.apache.solr.client.solrj.request.QueryRequest) Create(org.apache.solr.client.solrj.request.CollectionAdminRequest.Create) SolrRequest(org.apache.solr.client.solrj.SolrRequest) HashMap(java.util.HashMap) Map(java.util.Map) Utils.makeMap(org.apache.solr.common.util.Utils.makeMap)

Aggregations

Create (org.apache.solr.client.solrj.request.CollectionAdminRequest.Create)2 HashMap (java.util.HashMap)1 Map (java.util.Map)1 SolrClient (org.apache.solr.client.solrj.SolrClient)1 SolrRequest (org.apache.solr.client.solrj.SolrRequest)1 HttpSolrClient (org.apache.solr.client.solrj.impl.HttpSolrClient)1 QueryRequest (org.apache.solr.client.solrj.request.QueryRequest)1 CollectionAdminResponse (org.apache.solr.client.solrj.response.CollectionAdminResponse)1 RequestStatusState (org.apache.solr.client.solrj.response.RequestStatusState)1 MapSolrParams (org.apache.solr.common.params.MapSolrParams)1 Utils.makeMap (org.apache.solr.common.util.Utils.makeMap)1