Search in sources :

Example 1 with ImplicitDocRouter

use of org.apache.solr.common.cloud.ImplicitDocRouter in project lucene-solr by apache.

the class AbstractCloudBackupRestoreTestCase method testBackupAndRestore.

private void testBackupAndRestore(String collectionName) throws Exception {
    String backupLocation = getBackupLocation();
    String backupName = "mytestbackup";
    CloudSolrClient client = cluster.getSolrClient();
    DocCollection backupCollection = client.getZkStateReader().getClusterState().getCollection(collectionName);
    Map<String, Integer> origShardToDocCount = getShardToDocCountMap(client, backupCollection);
    assert origShardToDocCount.isEmpty() == false;
    log.info("Triggering Backup command");
    {
        CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(collectionName, backupName).setLocation(backupLocation).setRepositoryName(getBackupRepoName());
        if (random().nextBoolean()) {
            assertEquals(0, backup.process(client).getStatus());
        } else {
            //async
            assertEquals(RequestStatusState.COMPLETED, backup.processAndWait(client, 30));
        }
    }
    log.info("Triggering Restore command");
    String restoreCollectionName = collectionName + "_restored";
    boolean sameConfig = random().nextBoolean();
    {
        CollectionAdminRequest.Restore restore = CollectionAdminRequest.restoreCollection(restoreCollectionName, backupName).setLocation(backupLocation).setRepositoryName(getBackupRepoName());
        if (backupCollection.getReplicas().size() > cluster.getJettySolrRunners().size()) {
            // may need to increase maxShardsPerNode (e.g. if it was shard split, then now we need more)
            restore.setMaxShardsPerNode((int) Math.ceil(backupCollection.getReplicas().size() / cluster.getJettySolrRunners().size()));
        }
        if (rarely()) {
            // Try with createNodeSet configuration
            int nodeSetSize = cluster.getJettySolrRunners().size() / 2;
            List<String> nodeStrs = new ArrayList<>(nodeSetSize);
            Iterator<JettySolrRunner> iter = cluster.getJettySolrRunners().iterator();
            for (int i = 0; i < nodeSetSize; i++) {
                nodeStrs.add(iter.next().getNodeName());
            }
            restore.setCreateNodeSet(String.join(",", nodeStrs));
            restore.setCreateNodeSetShuffle(usually());
            // we need to double maxShardsPerNode value since we reduced number of available nodes by half.
            if (restore.getMaxShardsPerNode() != null) {
                restore.setMaxShardsPerNode(restore.getMaxShardsPerNode() * 2);
            } else {
                restore.setMaxShardsPerNode(origShardToDocCount.size() * 2);
            }
        }
        Properties props = new Properties();
        props.setProperty("customKey", "customVal");
        restore.setProperties(props);
        if (sameConfig == false) {
            restore.setConfigName("customConfigName");
        }
        if (random().nextBoolean()) {
            assertEquals(0, restore.process(client).getStatus());
        } else {
            //async
            assertEquals(RequestStatusState.COMPLETED, restore.processAndWait(client, 30));
        }
        AbstractDistribZkTestBase.waitForRecoveriesToFinish(restoreCollectionName, cluster.getSolrClient().getZkStateReader(), log.isDebugEnabled(), true, 30);
    }
    //Check the number of results are the same
    DocCollection restoreCollection = client.getZkStateReader().getClusterState().getCollection(restoreCollectionName);
    assertEquals(origShardToDocCount, getShardToDocCountMap(client, restoreCollection));
    //  test we reconstituted the hash ranges / doc router.
    if (!(restoreCollection.getRouter() instanceof ImplicitDocRouter) && random().nextBoolean()) {
        indexDocs(restoreCollectionName);
        assertEquals(origShardToDocCount, getShardToDocCountMap(client, restoreCollection));
    }
    assertEquals(backupCollection.getReplicationFactor(), restoreCollection.getReplicationFactor());
    assertEquals(backupCollection.getAutoAddReplicas(), restoreCollection.getAutoAddReplicas());
    assertEquals(backupCollection.getActiveSlices().iterator().next().getReplicas().size(), restoreCollection.getActiveSlices().iterator().next().getReplicas().size());
    assertEquals(sameConfig ? "conf1" : "customConfigName", cluster.getSolrClient().getZkStateReader().readConfigName(restoreCollectionName));
    Map<String, Integer> numReplicasByNodeName = new HashMap<>();
    restoreCollection.getReplicas().forEach(x -> {
        numReplicasByNodeName.put(x.getNodeName(), numReplicasByNodeName.getOrDefault(x.getNodeName(), 0) + 1);
    });
    numReplicasByNodeName.forEach((k, v) -> {
        assertTrue("Node " + k + " has " + v + " replicas. Expected num replicas : " + restoreCollection.getMaxShardsPerNode(), v <= restoreCollection.getMaxShardsPerNode());
    });
// assert added core properties:
// DWS: did via manual inspection.
// TODO Find the applicable core.properties on the file system but how?
}
Also used : ImplicitDocRouter(org.apache.solr.common.cloud.ImplicitDocRouter) HashMap(java.util.HashMap) Properties(java.util.Properties) CloudSolrClient(org.apache.solr.client.solrj.impl.CloudSolrClient) Iterator(java.util.Iterator) ArrayList(java.util.ArrayList) List(java.util.List) DocCollection(org.apache.solr.common.cloud.DocCollection)

Example 2 with ImplicitDocRouter

use of org.apache.solr.common.cloud.ImplicitDocRouter in project lucene-solr by apache.

the class AssignTest method testAssignNode.

@Test
public void testAssignNode() throws Exception {
    String cname = "collection1";
    Map<String, DocCollection> collectionStates = new HashMap<>();
    Map<String, Slice> slices = new HashMap<>();
    Map<String, Replica> replicas = new HashMap<>();
    ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, "state", ZkStateReader.STATE_PROP, Replica.State.ACTIVE.toString(), ZkStateReader.BASE_URL_PROP, "0.0.0.0", ZkStateReader.CORE_NAME_PROP, "core1", ZkStateReader.ROLES_PROP, null, ZkStateReader.NODE_NAME_PROP, "0_0_0_0", ZkStateReader.SHARD_ID_PROP, "shard1", ZkStateReader.COLLECTION_PROP, cname, ZkStateReader.NUM_SHARDS_PROP, "1", ZkStateReader.CORE_NODE_NAME_PROP, "core_node1");
    Replica replica = new Replica("core_node1", m.getProperties());
    replicas.put("core_node1", replica);
    Slice slice = new Slice("slice1", replicas, new HashMap<String, Object>(0));
    slices.put("slice1", slice);
    DocRouter router = new ImplicitDocRouter();
    DocCollection docCollection = new DocCollection(cname, slices, new HashMap<String, Object>(0), router);
    collectionStates.put(cname, docCollection);
    Set<String> liveNodes = new HashSet<>();
    ClusterState state = new ClusterState(-1, liveNodes, collectionStates);
    String nodeName = Assign.assignNode(state.getCollection("collection1"));
    assertEquals("core_node2", nodeName);
}
Also used : ClusterState(org.apache.solr.common.cloud.ClusterState) HashMap(java.util.HashMap) ImplicitDocRouter(org.apache.solr.common.cloud.ImplicitDocRouter) ZkNodeProps(org.apache.solr.common.cloud.ZkNodeProps) Replica(org.apache.solr.common.cloud.Replica) Slice(org.apache.solr.common.cloud.Slice) ImplicitDocRouter(org.apache.solr.common.cloud.ImplicitDocRouter) DocRouter(org.apache.solr.common.cloud.DocRouter) DocCollection(org.apache.solr.common.cloud.DocCollection) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 3 with ImplicitDocRouter

use of org.apache.solr.common.cloud.ImplicitDocRouter in project lucene-solr by apache.

the class CollectionMutator method updateSlice.

public static DocCollection updateSlice(String collectionName, DocCollection collection, Slice slice) {
    DocCollection newCollection = null;
    Map<String, Slice> slices;
    if (collection == null) {
        //  when updateSlice is called on a collection that doesn't exist, it's currently when a core is publishing itself
        // without explicitly creating a collection.  In this current case, we assume custom sharding with an "implicit" router.
        slices = new LinkedHashMap<>(1);
        slices.put(slice.getName(), slice);
        Map<String, Object> props = new HashMap<>(1);
        props.put(DocCollection.DOC_ROUTER, Utils.makeMap(NAME, ImplicitDocRouter.NAME));
        newCollection = new DocCollection(collectionName, slices, props, new ImplicitDocRouter());
    } else {
        // make a shallow copy
        slices = new LinkedHashMap<>(collection.getSlicesMap());
        slices.put(slice.getName(), slice);
        newCollection = collection.copyWithSlices(slices);
    }
    return newCollection;
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ImplicitDocRouter(org.apache.solr.common.cloud.ImplicitDocRouter) Slice(org.apache.solr.common.cloud.Slice) DocCollection(org.apache.solr.common.cloud.DocCollection)

Example 4 with ImplicitDocRouter

use of org.apache.solr.common.cloud.ImplicitDocRouter in project lucene-solr by apache.

the class CloudSolrClient method directUpdate.

private NamedList<Object> directUpdate(AbstractUpdateRequest request, String collection) throws SolrServerException {
    UpdateRequest updateRequest = (UpdateRequest) request;
    ModifiableSolrParams params = (ModifiableSolrParams) request.getParams();
    ModifiableSolrParams routableParams = new ModifiableSolrParams();
    ModifiableSolrParams nonRoutableParams = new ModifiableSolrParams();
    if (params != null) {
        nonRoutableParams.add(params);
        routableParams.add(params);
        for (String param : NON_ROUTABLE_PARAMS) {
            routableParams.remove(param);
        }
    }
    if (collection == null) {
        throw new SolrServerException("No collection param specified on request and no default collection has been set.");
    }
    //Check to see if the collection is an alias.
    collection = stateProvider.getCollectionName(collection);
    DocCollection col = getDocCollection(collection, null);
    DocRouter router = col.getRouter();
    if (router instanceof ImplicitDocRouter) {
        // short circuit as optimization
        return null;
    }
    //Create the URL map, which is keyed on slice name.
    //The value is a list of URLs for each replica in the slice.
    //The first value in the list is the leader for the slice.
    final Map<String, List<String>> urlMap = buildUrlMap(col);
    final Map<String, LBHttpSolrClient.Req> routes = (urlMap == null ? null : updateRequest.getRoutes(router, col, urlMap, routableParams, this.idField));
    if (routes == null) {
        if (directUpdatesToLeadersOnly && hasInfoToFindLeaders(updateRequest, idField)) {
            // which to find the leaders but we could not find (all of) them
            throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "directUpdatesToLeadersOnly==true but could not find leader(s)");
        } else {
            // we could not find a leader or routes yet - use unoptimized general path
            return null;
        }
    }
    final NamedList<Throwable> exceptions = new NamedList<>();
    // +1 for deleteQuery
    final NamedList<NamedList> shardResponses = new NamedList<>(routes.size() + 1);
    long start = System.nanoTime();
    if (parallelUpdates) {
        final Map<String, Future<NamedList<?>>> responseFutures = new HashMap<>(routes.size());
        for (final Map.Entry<String, LBHttpSolrClient.Req> entry : routes.entrySet()) {
            final String url = entry.getKey();
            final LBHttpSolrClient.Req lbRequest = entry.getValue();
            try {
                MDC.put("CloudSolrClient.url", url);
                responseFutures.put(url, threadPool.submit(() -> lbClient.request(lbRequest).getResponse()));
            } finally {
                MDC.remove("CloudSolrClient.url");
            }
        }
        for (final Map.Entry<String, Future<NamedList<?>>> entry : responseFutures.entrySet()) {
            final String url = entry.getKey();
            final Future<NamedList<?>> responseFuture = entry.getValue();
            try {
                shardResponses.add(url, responseFuture.get());
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                throw new RuntimeException(e);
            } catch (ExecutionException e) {
                exceptions.add(url, e.getCause());
            }
        }
        if (exceptions.size() > 0) {
            Throwable firstException = exceptions.getVal(0);
            if (firstException instanceof SolrException) {
                SolrException e = (SolrException) firstException;
                throw new RouteException(ErrorCode.getErrorCode(e.code()), exceptions, routes);
            } else {
                throw new RouteException(ErrorCode.SERVER_ERROR, exceptions, routes);
            }
        }
    } else {
        for (Map.Entry<String, LBHttpSolrClient.Req> entry : routes.entrySet()) {
            String url = entry.getKey();
            LBHttpSolrClient.Req lbRequest = entry.getValue();
            try {
                NamedList<Object> rsp = lbClient.request(lbRequest).getResponse();
                shardResponses.add(url, rsp);
            } catch (Exception e) {
                if (e instanceof SolrException) {
                    throw (SolrException) e;
                } else {
                    throw new SolrServerException(e);
                }
            }
        }
    }
    UpdateRequest nonRoutableRequest = null;
    List<String> deleteQuery = updateRequest.getDeleteQuery();
    if (deleteQuery != null && deleteQuery.size() > 0) {
        UpdateRequest deleteQueryRequest = new UpdateRequest();
        deleteQueryRequest.setDeleteQuery(deleteQuery);
        nonRoutableRequest = deleteQueryRequest;
    }
    Set<String> paramNames = nonRoutableParams.getParameterNames();
    Set<String> intersection = new HashSet<>(paramNames);
    intersection.retainAll(NON_ROUTABLE_PARAMS);
    if (nonRoutableRequest != null || intersection.size() > 0) {
        if (nonRoutableRequest == null) {
            nonRoutableRequest = new UpdateRequest();
        }
        nonRoutableRequest.setParams(nonRoutableParams);
        List<String> urlList = new ArrayList<>();
        urlList.addAll(routes.keySet());
        Collections.shuffle(urlList, rand);
        LBHttpSolrClient.Req req = new LBHttpSolrClient.Req(nonRoutableRequest, urlList);
        try {
            LBHttpSolrClient.Rsp rsp = lbClient.request(req);
            shardResponses.add(urlList.get(0), rsp.getResponse());
        } catch (Exception e) {
            throw new SolrException(ErrorCode.SERVER_ERROR, urlList.get(0), e);
        }
    }
    long end = System.nanoTime();
    RouteResponse rr = condenseResponse(shardResponses, (int) TimeUnit.MILLISECONDS.convert(end - start, TimeUnit.NANOSECONDS));
    rr.setRouteResponses(shardResponses);
    rr.setRoutes(routes);
    return rr;
}
Also used : ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) SolrServerException(org.apache.solr.client.solrj.SolrServerException) ArrayList(java.util.ArrayList) ModifiableSolrParams(org.apache.solr.common.params.ModifiableSolrParams) DocRouter(org.apache.solr.common.cloud.DocRouter) ImplicitDocRouter(org.apache.solr.common.cloud.ImplicitDocRouter) List(java.util.List) ArrayList(java.util.ArrayList) NamedList(org.apache.solr.common.util.NamedList) DocCollection(org.apache.solr.common.cloud.DocCollection) ExecutionException(java.util.concurrent.ExecutionException) SolrException(org.apache.solr.common.SolrException) HashSet(java.util.HashSet) IsUpdateRequest(org.apache.solr.client.solrj.request.IsUpdateRequest) AbstractUpdateRequest(org.apache.solr.client.solrj.request.AbstractUpdateRequest) UpdateRequest(org.apache.solr.client.solrj.request.UpdateRequest) ImplicitDocRouter(org.apache.solr.common.cloud.ImplicitDocRouter) NamedList(org.apache.solr.common.util.NamedList) TimeoutException(java.util.concurrent.TimeoutException) NoHttpResponseException(org.apache.http.NoHttpResponseException) SolrServerException(org.apache.solr.client.solrj.SolrServerException) SolrException(org.apache.solr.common.SolrException) SocketException(java.net.SocketException) ConnectTimeoutException(org.apache.http.conn.ConnectTimeoutException) ConnectException(java.net.ConnectException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) Future(java.util.concurrent.Future) SimpleOrderedMap(org.apache.solr.common.util.SimpleOrderedMap) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap)

Example 5 with ImplicitDocRouter

use of org.apache.solr.common.cloud.ImplicitDocRouter in project lucene-solr by apache.

the class RestoreCmd method call.

@Override
public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
    // TODO maybe we can inherit createCollection's options/code
    String restoreCollectionName = message.getStr(COLLECTION_PROP);
    // of backup
    String backupName = message.getStr(NAME);
    ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
    String asyncId = message.getStr(ASYNC);
    String repo = message.getStr(CoreAdminParams.BACKUP_REPOSITORY);
    Map<String, String> requestMap = new HashMap<>();
    CoreContainer cc = ocmh.overseer.getZkController().getCoreContainer();
    BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo));
    URI location = repository.createURI(message.getStr(CoreAdminParams.BACKUP_LOCATION));
    URI backupPath = repository.resolve(location, backupName);
    ZkStateReader zkStateReader = ocmh.zkStateReader;
    BackupManager backupMgr = new BackupManager(repository, zkStateReader);
    Properties properties = backupMgr.readBackupProperties(location, backupName);
    String backupCollection = properties.getProperty(BackupManager.COLLECTION_NAME_PROP);
    DocCollection backupCollectionState = backupMgr.readCollectionState(location, backupName, backupCollection);
    // Get the Solr nodes to restore a collection.
    final List<String> nodeList = OverseerCollectionMessageHandler.getLiveOrLiveAndCreateNodeSetList(zkStateReader.getClusterState().getLiveNodes(), message, RANDOM);
    int numShards = backupCollectionState.getActiveSlices().size();
    int numNrtReplicas = getInt(message, NRT_REPLICAS, backupCollectionState.getNumNrtReplicas(), 0);
    if (numNrtReplicas == 0) {
        numNrtReplicas = getInt(message, REPLICATION_FACTOR, backupCollectionState.getReplicationFactor(), 0);
    }
    int numTlogReplicas = getInt(message, TLOG_REPLICAS, backupCollectionState.getNumTlogReplicas(), 0);
    int numPullReplicas = getInt(message, PULL_REPLICAS, backupCollectionState.getNumPullReplicas(), 0);
    int totalReplicasPerShard = numNrtReplicas + numTlogReplicas + numPullReplicas;
    int maxShardsPerNode = message.getInt(MAX_SHARDS_PER_NODE, backupCollectionState.getMaxShardsPerNode());
    int availableNodeCount = nodeList.size();
    if ((numShards * totalReplicasPerShard) > (availableNodeCount * maxShardsPerNode)) {
        throw new SolrException(ErrorCode.BAD_REQUEST, String.format(Locale.ROOT, "Solr cloud with available number of nodes:%d is insufficient for" + " restoring a collection with %d shards, total replicas per shard %d and maxShardsPerNode %d." + " Consider increasing maxShardsPerNode value OR number of available nodes.", availableNodeCount, numShards, totalReplicasPerShard, maxShardsPerNode));
    }
    //Upload the configs
    String configName = (String) properties.get(COLL_CONF);
    String restoreConfigName = message.getStr(COLL_CONF, configName);
    if (zkStateReader.getConfigManager().configExists(restoreConfigName)) {
        log.info("Using existing config {}", restoreConfigName);
    //TODO add overwrite option?
    } else {
        log.info("Uploading config {}", restoreConfigName);
        backupMgr.uploadConfigDir(location, backupName, configName, restoreConfigName);
    }
    log.info("Starting restore into collection={} with backup_name={} at location={}", restoreCollectionName, backupName, location);
    //Create core-less collection
    {
        Map<String, Object> propMap = new HashMap<>();
        propMap.put(Overseer.QUEUE_OPERATION, CREATE.toString());
        // mostly true.  Prevents autoCreated=true in the collection state.
        propMap.put("fromApi", "true");
        // inherit settings from input API, defaulting to the backup's setting.  Ex: replicationFactor
        for (String collProp : COLL_PROPS.keySet()) {
            Object val = message.getProperties().getOrDefault(collProp, backupCollectionState.get(collProp));
            if (val != null) {
                propMap.put(collProp, val);
            }
        }
        propMap.put(NAME, restoreCollectionName);
        //no cores
        propMap.put(CREATE_NODE_SET, CREATE_NODE_SET_EMPTY);
        propMap.put(COLL_CONF, restoreConfigName);
        // router.*
        @SuppressWarnings("unchecked") Map<String, Object> routerProps = (Map<String, Object>) backupCollectionState.getProperties().get(DocCollection.DOC_ROUTER);
        for (Map.Entry<String, Object> pair : routerProps.entrySet()) {
            propMap.put(DocCollection.DOC_ROUTER + "." + pair.getKey(), pair.getValue());
        }
        Set<String> sliceNames = backupCollectionState.getActiveSlicesMap().keySet();
        if (backupCollectionState.getRouter() instanceof ImplicitDocRouter) {
            propMap.put(SHARDS_PROP, StrUtils.join(sliceNames, ','));
        } else {
            propMap.put(NUM_SLICES, sliceNames.size());
            // ClusterStateMutator.createCollection detects that "slices" is in fact a slice structure instead of a
            //   list of names, and if so uses this instead of building it.  We clear the replica list.
            Collection<Slice> backupSlices = backupCollectionState.getActiveSlices();
            Map<String, Slice> newSlices = new LinkedHashMap<>(backupSlices.size());
            for (Slice backupSlice : backupSlices) {
                newSlices.put(backupSlice.getName(), new Slice(backupSlice.getName(), Collections.emptyMap(), backupSlice.getProperties()));
            }
            propMap.put(SHARDS_PROP, newSlices);
        }
        ocmh.commandMap.get(CREATE).call(zkStateReader.getClusterState(), new ZkNodeProps(propMap), new NamedList());
    // note: when createCollection() returns, the collection exists (no race)
    }
    DocCollection restoreCollection = zkStateReader.getClusterState().getCollection(restoreCollectionName);
    DistributedQueue inQueue = Overseer.getStateUpdateQueue(zkStateReader.getZkClient());
    //Mark all shards in CONSTRUCTION STATE while we restore the data
    {
        //TODO might instead createCollection accept an initial state?  Is there a race?
        Map<String, Object> propMap = new HashMap<>();
        propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
        for (Slice shard : restoreCollection.getSlices()) {
            propMap.put(shard.getName(), Slice.State.CONSTRUCTION.toString());
        }
        propMap.put(ZkStateReader.COLLECTION_PROP, restoreCollectionName);
        inQueue.offer(Utils.toJSON(new ZkNodeProps(propMap)));
    }
    // TODO how do we leverage the RULE / SNITCH logic in createCollection?
    ClusterState clusterState = zkStateReader.getClusterState();
    List<String> sliceNames = new ArrayList<>();
    restoreCollection.getSlices().forEach(x -> sliceNames.add(x.getName()));
    Map<ReplicaAssigner.Position, String> positionVsNodes = ocmh.identifyNodes(clusterState, nodeList, message, sliceNames, numNrtReplicas, numTlogReplicas, numPullReplicas);
    //Create one replica per shard and copy backed up data to it
    for (Slice slice : restoreCollection.getSlices()) {
        log.debug("Adding replica for shard={} collection={} ", slice.getName(), restoreCollection);
        HashMap<String, Object> propMap = new HashMap<>();
        propMap.put(Overseer.QUEUE_OPERATION, CREATESHARD);
        propMap.put(COLLECTION_PROP, restoreCollectionName);
        propMap.put(SHARD_ID_PROP, slice.getName());
        if (numNrtReplicas >= 1) {
            propMap.put(REPLICA_TYPE, Replica.Type.NRT.name());
        } else if (numTlogReplicas >= 1) {
            propMap.put(REPLICA_TYPE, Replica.Type.TLOG.name());
        } else {
            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unexpected number of replicas, replicationFactor, " + Replica.Type.NRT + " or " + Replica.Type.TLOG + " must be greater than 0");
        }
        // Get the first node matching the shard to restore in
        String node;
        for (Map.Entry<ReplicaAssigner.Position, String> pvn : positionVsNodes.entrySet()) {
            ReplicaAssigner.Position position = pvn.getKey();
            if (position.shard == slice.getName()) {
                node = pvn.getValue();
                propMap.put(CoreAdminParams.NODE, node);
                positionVsNodes.remove(position);
                break;
            }
        }
        // add async param
        if (asyncId != null) {
            propMap.put(ASYNC, asyncId);
        }
        ocmh.addPropertyParams(message, propMap);
        ocmh.addReplica(clusterState, new ZkNodeProps(propMap), new NamedList(), null);
    }
    //refresh the location copy of collection state
    restoreCollection = zkStateReader.getClusterState().getCollection(restoreCollectionName);
    //Copy data from backed up index to each replica
    for (Slice slice : restoreCollection.getSlices()) {
        ModifiableSolrParams params = new ModifiableSolrParams();
        params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.RESTORECORE.toString());
        params.set(NAME, "snapshot." + slice.getName());
        params.set(CoreAdminParams.BACKUP_LOCATION, backupPath.toASCIIString());
        params.set(CoreAdminParams.BACKUP_REPOSITORY, repo);
        ocmh.sliceCmd(clusterState, params, null, slice, shardHandler, asyncId, requestMap);
    }
    ocmh.processResponses(new NamedList(), shardHandler, true, "Could not restore core", asyncId, requestMap);
    //Mark all shards in ACTIVE STATE
    {
        HashMap<String, Object> propMap = new HashMap<>();
        propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
        propMap.put(ZkStateReader.COLLECTION_PROP, restoreCollectionName);
        for (Slice shard : restoreCollection.getSlices()) {
            propMap.put(shard.getName(), Slice.State.ACTIVE.toString());
        }
        inQueue.offer(Utils.toJSON(new ZkNodeProps(propMap)));
    }
    //refresh the location copy of collection state
    restoreCollection = zkStateReader.getClusterState().getCollection(restoreCollectionName);
    if (totalReplicasPerShard > 1) {
        log.info("Adding replicas to restored collection={}", restoreCollection);
        for (Slice slice : restoreCollection.getSlices()) {
            //Add the remaining replicas for each shard, considering it's type
            int createdNrtReplicas = 0, createdTlogReplicas = 0, createdPullReplicas = 0;
            // We already created either a NRT or an TLOG replica as leader
            if (numNrtReplicas > 0) {
                createdNrtReplicas++;
            } else if (createdTlogReplicas > 0) {
                createdTlogReplicas++;
            }
            for (int i = 1; i < totalReplicasPerShard; i++) {
                Replica.Type typeToCreate;
                if (createdNrtReplicas < numNrtReplicas) {
                    createdNrtReplicas++;
                    typeToCreate = Replica.Type.NRT;
                } else if (createdTlogReplicas < numTlogReplicas) {
                    createdTlogReplicas++;
                    typeToCreate = Replica.Type.TLOG;
                } else {
                    createdPullReplicas++;
                    typeToCreate = Replica.Type.PULL;
                    assert createdPullReplicas <= numPullReplicas : "Unexpected number of replicas";
                }
                log.debug("Adding replica for shard={} collection={} of type {} ", slice.getName(), restoreCollection, typeToCreate);
                HashMap<String, Object> propMap = new HashMap<>();
                propMap.put(COLLECTION_PROP, restoreCollectionName);
                propMap.put(SHARD_ID_PROP, slice.getName());
                propMap.put(REPLICA_TYPE, typeToCreate.name());
                // Get the first node matching the shard to restore in
                String node;
                for (Map.Entry<ReplicaAssigner.Position, String> pvn : positionVsNodes.entrySet()) {
                    ReplicaAssigner.Position position = pvn.getKey();
                    if (position.shard == slice.getName()) {
                        node = pvn.getValue();
                        propMap.put(CoreAdminParams.NODE, node);
                        positionVsNodes.remove(position);
                        break;
                    }
                }
                // add async param
                if (asyncId != null) {
                    propMap.put(ASYNC, asyncId);
                }
                ocmh.addPropertyParams(message, propMap);
                ocmh.addReplica(zkStateReader.getClusterState(), new ZkNodeProps(propMap), results, null);
            }
        }
    }
    log.info("Completed restoring collection={} backupName={}", restoreCollection, backupName);
}
Also used : Set(java.util.Set) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ZkNodeProps(org.apache.solr.common.cloud.ZkNodeProps) ArrayList(java.util.ArrayList) Properties(java.util.Properties) URI(java.net.URI) ModifiableSolrParams(org.apache.solr.common.params.ModifiableSolrParams) ZkStateReader(org.apache.solr.common.cloud.ZkStateReader) BackupRepository(org.apache.solr.core.backup.repository.BackupRepository) CoreContainer(org.apache.solr.core.CoreContainer) DocCollection(org.apache.solr.common.cloud.DocCollection) SolrException(org.apache.solr.common.SolrException) ClusterState(org.apache.solr.common.cloud.ClusterState) ImplicitDocRouter(org.apache.solr.common.cloud.ImplicitDocRouter) NamedList(org.apache.solr.common.util.NamedList) ReplicaAssigner(org.apache.solr.cloud.rule.ReplicaAssigner) ShardHandler(org.apache.solr.handler.component.ShardHandler) BackupManager(org.apache.solr.core.backup.BackupManager) Replica(org.apache.solr.common.cloud.Replica) Slice(org.apache.solr.common.cloud.Slice) DocCollection(org.apache.solr.common.cloud.DocCollection) Collection(java.util.Collection) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap)

Aggregations

HashMap (java.util.HashMap)6 DocCollection (org.apache.solr.common.cloud.DocCollection)6 ImplicitDocRouter (org.apache.solr.common.cloud.ImplicitDocRouter)6 ArrayList (java.util.ArrayList)4 Slice (org.apache.solr.common.cloud.Slice)4 LinkedHashMap (java.util.LinkedHashMap)3 Map (java.util.Map)3 SolrException (org.apache.solr.common.SolrException)3 DocRouter (org.apache.solr.common.cloud.DocRouter)3 HashSet (java.util.HashSet)2 List (java.util.List)2 Properties (java.util.Properties)2 ClusterState (org.apache.solr.common.cloud.ClusterState)2 Replica (org.apache.solr.common.cloud.Replica)2 ZkNodeProps (org.apache.solr.common.cloud.ZkNodeProps)2 ModifiableSolrParams (org.apache.solr.common.params.ModifiableSolrParams)2 NamedList (org.apache.solr.common.util.NamedList)2 IOException (java.io.IOException)1 ConnectException (java.net.ConnectException)1 SocketException (java.net.SocketException)1