Search in sources :

Example 51 with Slice

use of org.apache.solr.common.cloud.Slice in project lucene-solr by apache.

the class OverseerTest method testReplay.

@Test
public void testReplay() throws Exception {
    String zkDir = createTempDir().toFile().getAbsolutePath() + File.separator + "zookeeper/server1/data";
    ZkTestServer server = new ZkTestServer(zkDir);
    SolrZkClient zkClient = null;
    SolrZkClient overseerClient = null;
    ZkStateReader reader = null;
    try {
        server.run();
        zkClient = new SolrZkClient(server.getZkAddress(), TIMEOUT);
        AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
        AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
        ZkController.createClusterZkNodes(zkClient);
        reader = new ZkStateReader(zkClient);
        reader.createClusterStateWatchersAndUpdate();
        //prepopulate work queue with some items to emulate previous overseer died before persisting state
        DistributedQueue queue = Overseer.getInternalWorkQueue(zkClient, new Overseer.Stats());
        ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(), ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr", ZkStateReader.NODE_NAME_PROP, "node1", ZkStateReader.SHARD_ID_PROP, "s1", ZkStateReader.COLLECTION_PROP, COLLECTION, ZkStateReader.CORE_NAME_PROP, "core1", ZkStateReader.ROLES_PROP, "", ZkStateReader.STATE_PROP, Replica.State.RECOVERING.toString());
        queue.offer(Utils.toJSON(m));
        m = new ZkNodeProps(Overseer.QUEUE_OPERATION, "state", ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr", ZkStateReader.NODE_NAME_PROP, "node1", ZkStateReader.SHARD_ID_PROP, "s1", ZkStateReader.COLLECTION_PROP, COLLECTION, ZkStateReader.CORE_NAME_PROP, "core2", ZkStateReader.ROLES_PROP, "", ZkStateReader.STATE_PROP, Replica.State.RECOVERING.toString());
        queue.offer(Utils.toJSON(m));
        overseerClient = electNewOverseer(server.getZkAddress());
        //submit to proper queue
        queue = Overseer.getStateUpdateQueue(zkClient);
        m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(), ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr", ZkStateReader.NODE_NAME_PROP, "node1", ZkStateReader.SHARD_ID_PROP, "s1", ZkStateReader.COLLECTION_PROP, COLLECTION, ZkStateReader.CORE_NAME_PROP, "core3", ZkStateReader.ROLES_PROP, "", ZkStateReader.STATE_PROP, Replica.State.RECOVERING.toString());
        queue.offer(Utils.toJSON(m));
        for (int i = 0; i < 100; i++) {
            Slice s = reader.getClusterState().getSlice(COLLECTION, "s1");
            if (s != null && s.getReplicasMap().size() == 3)
                break;
            Thread.sleep(100);
        }
        assertNotNull(reader.getClusterState().getSlice(COLLECTION, "s1"));
        assertEquals(3, reader.getClusterState().getSlice(COLLECTION, "s1").getReplicasMap().size());
    } finally {
        close(overseerClient);
        close(zkClient);
        close(reader);
        server.shutdown();
    }
}
Also used : ZkStateReader(org.apache.solr.common.cloud.ZkStateReader) Slice(org.apache.solr.common.cloud.Slice) ZkNodeProps(org.apache.solr.common.cloud.ZkNodeProps) SolrZkClient(org.apache.solr.common.cloud.SolrZkClient) Test(org.junit.Test)

Example 52 with Slice

use of org.apache.solr.common.cloud.Slice in project lucene-solr by apache.

the class SSLMigrationTest method getReplicas.

private List<Replica> getReplicas() {
    List<Replica> replicas = new ArrayList<Replica>();
    DocCollection collection = this.cloudClient.getZkStateReader().getClusterState().getCollection(DEFAULT_COLLECTION);
    for (Slice slice : collection.getSlices()) {
        replicas.addAll(slice.getReplicas());
    }
    return replicas;
}
Also used : Slice(org.apache.solr.common.cloud.Slice) ArrayList(java.util.ArrayList) DocCollection(org.apache.solr.common.cloud.DocCollection) Replica(org.apache.solr.common.cloud.Replica)

Example 53 with Slice

use of org.apache.solr.common.cloud.Slice in project lucene-solr by apache.

the class HdfsCollectionsAPIDistributedZkTest method moveReplicaTest.

@Test
public void moveReplicaTest() throws Exception {
    cluster.waitForAllNodes(5000);
    String coll = "movereplicatest_coll";
    CloudSolrClient cloudClient = cluster.getSolrClient();
    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf", 2, 2);
    create.setMaxShardsPerNode(2);
    cloudClient.request(create);
    for (int i = 0; i < 10; i++) {
        cloudClient.add(coll, sdoc("id", String.valueOf(i)));
        cloudClient.commit(coll);
    }
    List<Slice> slices = new ArrayList<>(cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlices());
    Collections.shuffle(slices, random());
    Slice slice = null;
    Replica replica = null;
    for (Slice s : slices) {
        slice = s;
        for (Replica r : s.getReplicas()) {
            if (s.getLeader() != r) {
                replica = r;
            }
        }
    }
    String dataDir = getDataDir(replica);
    Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
    ArrayList<String> l = new ArrayList<>(liveNodes);
    Collections.shuffle(l, random());
    String targetNode = null;
    for (String node : liveNodes) {
        if (!replica.getNodeName().equals(node)) {
            targetNode = node;
            break;
        }
    }
    assertNotNull(targetNode);
    CollectionAdminRequest.MoveReplica moveReplica = new CollectionAdminRequest.MoveReplica(coll, replica.getName(), targetNode);
    moveReplica.process(cloudClient);
    checkNumOfCores(cloudClient, replica.getNodeName(), 0);
    checkNumOfCores(cloudClient, targetNode, 2);
    waitForState("Wait for recovery finish failed", coll, clusterShape(2, 2));
    slice = cloudClient.getZkStateReader().getClusterState().getCollection(coll).getSlice(slice.getName());
    boolean found = false;
    for (Replica newReplica : slice.getReplicas()) {
        if (getDataDir(newReplica).equals(dataDir)) {
            found = true;
        }
    }
    assertTrue(found);
    // data dir is reused so replication will be skipped
    for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
        SolrMetricManager manager = jetty.getCoreContainer().getMetricManager();
        List<String> registryNames = manager.registryNames().stream().filter(s -> s.startsWith("solr.core.")).collect(Collectors.toList());
        for (String registry : registryNames) {
            Map<String, Metric> metrics = manager.registry(registry).getMetrics();
            Counter counter = (Counter) metrics.get("REPLICATION./replication.requests");
            if (counter != null) {
                assertEquals(0, counter.getCount());
            }
        }
    }
}
Also used : Nightly(com.carrotsearch.randomizedtesting.annotations.Nightly) BadHdfsThreadsFilter(org.apache.solr.util.BadHdfsThreadsFilter) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) BeforeClass(org.junit.BeforeClass) Slow(org.apache.lucene.util.LuceneTestCase.Slow) CoreAdminResponse(org.apache.solr.client.solrj.response.CoreAdminResponse) ArrayList(java.util.ArrayList) SolrServerException(org.apache.solr.client.solrj.SolrServerException) Map(java.util.Map) ThreadLeakFilters(com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters) Counter(com.codahale.metrics.Counter) JettySolrRunner(org.apache.solr.client.solrj.embedded.JettySolrRunner) CloudSolrClient(org.apache.solr.client.solrj.impl.CloudSolrClient) CoreStatus(org.apache.solr.client.solrj.request.CoreStatus) AfterClass(org.junit.AfterClass) Slice(org.apache.solr.common.cloud.Slice) Set(java.util.Set) Metric(com.codahale.metrics.Metric) IOException(java.io.IOException) Test(org.junit.Test) Collectors(java.util.stream.Collectors) Replica(org.apache.solr.common.cloud.Replica) List(java.util.List) HttpSolrClient(org.apache.solr.client.solrj.impl.HttpSolrClient) SolrMetricManager(org.apache.solr.metrics.SolrMetricManager) ZkConfigManager(org.apache.solr.common.cloud.ZkConfigManager) Collections(java.util.Collections) CoreAdminRequest(org.apache.solr.client.solrj.request.CoreAdminRequest) CollectionAdminRequest(org.apache.solr.client.solrj.request.CollectionAdminRequest) CollectionsAPIDistributedZkTest(org.apache.solr.cloud.CollectionsAPIDistributedZkTest) JettySolrRunner(org.apache.solr.client.solrj.embedded.JettySolrRunner) ArrayList(java.util.ArrayList) CollectionAdminRequest(org.apache.solr.client.solrj.request.CollectionAdminRequest) Replica(org.apache.solr.common.cloud.Replica) CloudSolrClient(org.apache.solr.client.solrj.impl.CloudSolrClient) Counter(com.codahale.metrics.Counter) Slice(org.apache.solr.common.cloud.Slice) SolrMetricManager(org.apache.solr.metrics.SolrMetricManager) Metric(com.codahale.metrics.Metric) Test(org.junit.Test) CollectionsAPIDistributedZkTest(org.apache.solr.cloud.CollectionsAPIDistributedZkTest)

Example 54 with Slice

use of org.apache.solr.common.cloud.Slice in project lucene-solr by apache.

the class TestHdfsBackupRestoreCore method test.

@Test
public void test() throws Exception {
    CloudSolrClient solrClient = cluster.getSolrClient();
    String collectionName = "HdfsBackupRestore";
    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 1);
    create.process(solrClient);
    int nDocs = BackupRestoreUtils.indexDocs(solrClient, collectionName, docsSeed);
    DocCollection collectionState = solrClient.getZkStateReader().getClusterState().getCollection(collectionName);
    assertEquals(1, collectionState.getActiveSlices().size());
    Slice shard = collectionState.getActiveSlices().iterator().next();
    assertEquals(1, shard.getReplicas().size());
    Replica replica = shard.getReplicas().iterator().next();
    String replicaBaseUrl = replica.getStr(BASE_URL_PROP);
    String coreName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
    String backupName = TestUtil.randomSimpleString(random(), 1, 5);
    boolean testViaReplicationHandler = random().nextBoolean();
    String baseUrl = cluster.getJettySolrRunners().get(0).getBaseUrl().toString();
    try (SolrClient masterClient = getHttpSolrClient(replicaBaseUrl)) {
        // Create a backup.
        if (testViaReplicationHandler) {
            log.info("Running Backup via replication handler");
            BackupRestoreUtils.runReplicationHandlerCommand(baseUrl, coreName, ReplicationHandler.CMD_BACKUP, "hdfs", backupName);
            CheckBackupStatus checkBackupStatus = new CheckBackupStatus((HttpSolrClient) masterClient, coreName, null);
            while (!checkBackupStatus.success) {
                checkBackupStatus.fetchStatus();
                Thread.sleep(1000);
            }
        } else {
            log.info("Running Backup via core admin api");
            Map<String, String> params = new HashMap<>();
            params.put("name", backupName);
            params.put(CoreAdminParams.BACKUP_REPOSITORY, "hdfs");
            BackupRestoreUtils.runCoreAdminCommand(replicaBaseUrl, coreName, CoreAdminAction.BACKUPCORE.toString(), params);
        }
        int numRestoreTests = nDocs > 0 ? TestUtil.nextInt(random(), 1, 5) : 1;
        for (int attempts = 0; attempts < numRestoreTests; attempts++) {
            //Modify existing index before we call restore.
            if (nDocs > 0) {
                //Delete a few docs
                int numDeletes = TestUtil.nextInt(random(), 1, nDocs);
                for (int i = 0; i < numDeletes; i++) {
                    masterClient.deleteByQuery(collectionName, "id:" + i);
                }
                masterClient.commit(collectionName);
                //Add a few more
                int moreAdds = TestUtil.nextInt(random(), 1, 100);
                for (int i = 0; i < moreAdds; i++) {
                    SolrInputDocument doc = new SolrInputDocument();
                    doc.addField("id", i + nDocs);
                    doc.addField("name", "name = " + (i + nDocs));
                    masterClient.add(collectionName, doc);
                }
                //Purposely not calling commit once in a while. There can be some docs which are not committed
                if (usually()) {
                    masterClient.commit(collectionName);
                }
            }
            // Snapshooter prefixes "snapshot." to the backup name.
            if (testViaReplicationHandler) {
                log.info("Running Restore via replication handler");
                // Snapshooter prefixes "snapshot." to the backup name.
                BackupRestoreUtils.runReplicationHandlerCommand(baseUrl, coreName, ReplicationHandler.CMD_RESTORE, "hdfs", backupName);
                while (!TestRestoreCore.fetchRestoreStatus(baseUrl, coreName)) {
                    Thread.sleep(1000);
                }
            } else {
                log.info("Running Restore via core admin api");
                Map<String, String> params = new HashMap<>();
                params.put("name", "snapshot." + backupName);
                params.put(CoreAdminParams.BACKUP_REPOSITORY, "hdfs");
                BackupRestoreUtils.runCoreAdminCommand(replicaBaseUrl, coreName, CoreAdminAction.RESTORECORE.toString(), params);
            }
            //See if restore was successful by checking if all the docs are present again
            BackupRestoreUtils.verifyDocs(nDocs, masterClient, coreName);
            // Verify the permissions for the backup folder.
            FileStatus status = fs.getFileStatus(new org.apache.hadoop.fs.Path("/backup/snapshot." + backupName));
            FsPermission perm = status.getPermission();
            assertEquals(FsAction.ALL, perm.getUserAction());
            assertEquals(FsAction.ALL, perm.getGroupAction());
            assertEquals(FsAction.ALL, perm.getOtherAction());
        }
    }
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) HashMap(java.util.HashMap) CollectionAdminRequest(org.apache.solr.client.solrj.request.CollectionAdminRequest) Replica(org.apache.solr.common.cloud.Replica) CloudSolrClient(org.apache.solr.client.solrj.impl.CloudSolrClient) SolrInputDocument(org.apache.solr.common.SolrInputDocument) CloudSolrClient(org.apache.solr.client.solrj.impl.CloudSolrClient) SolrClient(org.apache.solr.client.solrj.SolrClient) HttpSolrClient(org.apache.solr.client.solrj.impl.HttpSolrClient) Slice(org.apache.solr.common.cloud.Slice) DocCollection(org.apache.solr.common.cloud.DocCollection) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 55 with Slice

use of org.apache.solr.common.cloud.Slice in project lucene-solr by apache.

the class TestConfigReload method checkConfReload.

private void checkConfReload(SolrZkClient client, String resPath, String name, String uri) throws Exception {
    Stat stat = new Stat();
    byte[] data = null;
    try {
        data = client.getData(resPath, null, stat, true);
    } catch (KeeperException.NoNodeException e) {
        data = "{}".getBytes(StandardCharsets.UTF_8);
        log.info("creating_node {}", resPath);
        client.create(resPath, data, CreateMode.PERSISTENT, true);
    }
    long startTime = System.nanoTime();
    Stat newStat = client.setData(resPath, data, true);
    client.setData("/configs/conf1", new byte[] { 1 }, true);
    assertTrue(newStat.getVersion() > stat.getVersion());
    log.info("new_version " + newStat.getVersion());
    Integer newVersion = newStat.getVersion();
    long maxTimeoutSeconds = 20;
    DocCollection coll = cloudClient.getZkStateReader().getClusterState().getCollection("collection1");
    List<String> urls = new ArrayList<>();
    for (Slice slice : coll.getSlices()) {
        for (Replica replica : slice.getReplicas()) urls.add("" + replica.get(ZkStateReader.BASE_URL_PROP) + "/" + replica.get(ZkStateReader.CORE_NAME_PROP));
    }
    HashSet<String> succeeded = new HashSet<>();
    while (TimeUnit.SECONDS.convert(System.nanoTime() - startTime, TimeUnit.NANOSECONDS) < maxTimeoutSeconds) {
        Thread.sleep(50);
        for (String url : urls) {
            Map respMap = getAsMap(url + uri + "?wt=json");
            if (String.valueOf(newVersion).equals(String.valueOf(getObjectByPath(respMap, true, asList(name, "znodeVersion"))))) {
                succeeded.add(url);
            }
        }
        if (succeeded.size() == urls.size())
            break;
        succeeded.clear();
    }
    assertEquals(StrUtils.formatString("tried these servers {0} succeeded only in {1} ", urls, succeeded), urls.size(), succeeded.size());
}
Also used : ArrayList(java.util.ArrayList) Replica(org.apache.solr.common.cloud.Replica) Stat(org.apache.zookeeper.data.Stat) Slice(org.apache.solr.common.cloud.Slice) DocCollection(org.apache.solr.common.cloud.DocCollection) Map(java.util.Map) KeeperException(org.apache.zookeeper.KeeperException) HashSet(java.util.HashSet)

Aggregations

Slice (org.apache.solr.common.cloud.Slice)219 Replica (org.apache.solr.common.cloud.Replica)141 DocCollection (org.apache.solr.common.cloud.DocCollection)121 ClusterState (org.apache.solr.common.cloud.ClusterState)80 ArrayList (java.util.ArrayList)79 HashMap (java.util.HashMap)66 SolrException (org.apache.solr.common.SolrException)49 ZkStateReader (org.apache.solr.common.cloud.ZkStateReader)49 Map (java.util.Map)45 Test (org.junit.Test)37 HttpSolrClient (org.apache.solr.client.solrj.impl.HttpSolrClient)28 CloudSolrClient (org.apache.solr.client.solrj.impl.CloudSolrClient)25 HashSet (java.util.HashSet)24 SolrQuery (org.apache.solr.client.solrj.SolrQuery)24 IOException (java.io.IOException)23 NamedList (org.apache.solr.common.util.NamedList)23 List (java.util.List)22 ModifiableSolrParams (org.apache.solr.common.params.ModifiableSolrParams)22 DocRouter (org.apache.solr.common.cloud.DocRouter)20 ZkCoreNodeProps (org.apache.solr.common.cloud.ZkCoreNodeProps)20