Search in sources :

Example 6 with ClusterState

use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.

the class OverseerAutoReplicaFailoverThread method doWork.

private void doWork() {
    // TODO: extract to configurable strategy class ??
    ClusterState clusterState = zkStateReader.getClusterState();
    //check if we have disabled autoAddReplicas cluster wide
    String autoAddReplicas = zkStateReader.getClusterProperty(ZkStateReader.AUTO_ADD_REPLICAS, (String) null);
    if (autoAddReplicas != null && autoAddReplicas.equals("false")) {
        return;
    }
    if (clusterState != null) {
        if (clusterState.getZkClusterStateVersion() != null && clusterState.getZkClusterStateVersion().equals(lastClusterStateVersion) && baseUrlForBadNodes.size() == 0 && liveNodes.equals(clusterState.getLiveNodes())) {
            // nothing has changed, no work to do
            return;
        }
        liveNodes = clusterState.getLiveNodes();
        lastClusterStateVersion = clusterState.getZkClusterStateVersion();
        Map<String, DocCollection> collections = clusterState.getCollectionsMap();
        for (Map.Entry<String, DocCollection> entry : collections.entrySet()) {
            log.debug("look at collection={}", entry.getKey());
            DocCollection docCollection = entry.getValue();
            if (!docCollection.getAutoAddReplicas()) {
                log.debug("Collection {} is not setup to use autoAddReplicas, skipping..", docCollection.getName());
                continue;
            }
            if (docCollection.getReplicationFactor() == null) {
                log.debug("Skipping collection because it has no defined replicationFactor, name={}", docCollection.getName());
                continue;
            }
            log.debug("Found collection, name={} replicationFactor={}", entry.getKey(), docCollection.getReplicationFactor());
            Collection<Slice> slices = docCollection.getSlices();
            for (Slice slice : slices) {
                if (slice.getState() == Slice.State.ACTIVE) {
                    final Collection<DownReplica> downReplicas = new ArrayList<DownReplica>();
                    int goodReplicas = findDownReplicasInSlice(clusterState, docCollection, slice, downReplicas);
                    log.debug("collection={} replicationFactor={} goodReplicaCount={}", docCollection.getName(), docCollection.getReplicationFactor(), goodReplicas);
                    if (downReplicas.size() > 0 && goodReplicas < docCollection.getReplicationFactor()) {
                        // badReplicaMap.put(collection, badReplicas);
                        processBadReplicas(entry.getKey(), downReplicas);
                    } else if (goodReplicas > docCollection.getReplicationFactor()) {
                        log.debug("There are too many replicas");
                    }
                }
            }
        }
    }
}
Also used : ClusterState(org.apache.solr.common.cloud.ClusterState) Slice(org.apache.solr.common.cloud.Slice) ArrayList(java.util.ArrayList) DocCollection(org.apache.solr.common.cloud.DocCollection) HashMap(java.util.HashMap) Map(java.util.Map) TreeMap(java.util.TreeMap)

Example 7 with ClusterState

use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.

the class OverseerAutoReplicaFailoverThread method getBestCreateUrl.

/**
   * 
   * @return the best node to replace the badReplica on or null if there is no
   *         such node
   */
static String getBestCreateUrl(ZkStateReader zkStateReader, DownReplica badReplica, Integer maxCoreCount) {
    assert badReplica != null;
    assert badReplica.collection != null;
    assert badReplica.slice != null;
    log.debug("getBestCreateUrl for " + badReplica.replica);
    Map<String, Counts> counts = new HashMap<>();
    Set<String> unsuitableHosts = new HashSet<>();
    Set<String> liveNodes = new HashSet<>(zkStateReader.getClusterState().getLiveNodes());
    Map<String, Integer> coresPerNode = new HashMap<>();
    ClusterState clusterState = zkStateReader.getClusterState();
    if (clusterState != null) {
        Map<String, DocCollection> collections = clusterState.getCollectionsMap();
        for (Map.Entry<String, DocCollection> entry : collections.entrySet()) {
            String collection = entry.getKey();
            log.debug("look at collection {} as possible create candidate", collection);
            DocCollection docCollection = entry.getValue();
            // TODO - only operate on collections with sharedfs failover = true ??
            Collection<Slice> slices = docCollection.getSlices();
            for (Slice slice : slices) {
                // only look at active shards
                if (slice.getState() == Slice.State.ACTIVE) {
                    log.debug("look at slice {} for collection {} as possible create candidate", slice.getName(), collection);
                    Collection<Replica> replicas = slice.getReplicas();
                    for (Replica replica : replicas) {
                        liveNodes.remove(replica.getNodeName());
                        String baseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP);
                        if (coresPerNode.containsKey(baseUrl)) {
                            Integer nodeCount = coresPerNode.get(baseUrl);
                            coresPerNode.put(baseUrl, nodeCount++);
                        } else {
                            coresPerNode.put(baseUrl, 1);
                        }
                        if (baseUrl.equals(badReplica.replica.getStr(ZkStateReader.BASE_URL_PROP))) {
                            continue;
                        }
                        // on a live node?
                        log.debug("collection={} nodename={} livenodes={}", collection, replica.getNodeName(), clusterState.getLiveNodes());
                        boolean live = clusterState.liveNodesContain(replica.getNodeName());
                        log.debug("collection={} look at replica {} as possible create candidate, live={}", collection, replica.getName(), live);
                        if (live) {
                            Counts cnt = counts.get(baseUrl);
                            if (cnt == null) {
                                cnt = new Counts();
                            }
                            if (badReplica.collection.getName().equals(collection)) {
                                cnt.negRankingWeight += 3;
                                cnt.collectionShardsOnNode += 1;
                            } else {
                                cnt.negRankingWeight += 1;
                            }
                            if (badReplica.collection.getName().equals(collection) && badReplica.slice.getName().equals(slice.getName())) {
                                cnt.ourReplicas++;
                            }
                            Integer maxShardsPerNode = badReplica.collection.getMaxShardsPerNode();
                            if (maxShardsPerNode == null) {
                                log.warn("maxShardsPerNode is not defined for collection, name=" + badReplica.collection.getName());
                                maxShardsPerNode = Integer.MAX_VALUE;
                            }
                            log.debug("collection={} node={} maxShardsPerNode={} maxCoresPerNode={} potential hosts={}", collection, baseUrl, maxShardsPerNode, maxCoreCount, cnt);
                            Collection<Replica> badSliceReplicas = null;
                            DocCollection c = clusterState.getCollection(badReplica.collection.getName());
                            if (c != null) {
                                Slice s = c.getSlice(badReplica.slice.getName());
                                if (s != null) {
                                    badSliceReplicas = s.getReplicas();
                                }
                            }
                            boolean alreadyExistsOnNode = replicaAlreadyExistsOnNode(zkStateReader.getClusterState(), badSliceReplicas, badReplica, baseUrl);
                            if (unsuitableHosts.contains(baseUrl) || alreadyExistsOnNode || cnt.collectionShardsOnNode >= maxShardsPerNode || (maxCoreCount != null && coresPerNode.get(baseUrl) >= maxCoreCount)) {
                                counts.remove(baseUrl);
                                unsuitableHosts.add(baseUrl);
                                log.debug("not a candidate node, collection={} node={} max shards per node={} good replicas={}", collection, baseUrl, maxShardsPerNode, cnt);
                            } else {
                                counts.put(baseUrl, cnt);
                                log.debug("is a candidate node, collection={} node={} max shards per node={} good replicas={}", collection, baseUrl, maxShardsPerNode, cnt);
                            }
                        }
                    }
                }
            }
        }
    }
    for (String node : liveNodes) {
        counts.put(zkStateReader.getBaseUrlForNodeName(node), new Counts(0, 0));
    }
    if (counts.size() == 0) {
        log.debug("no suitable hosts found for getBestCreateUrl for collection={}", badReplica.collection.getName());
        return null;
    }
    ValueComparator vc = new ValueComparator(counts);
    Map<String, Counts> sortedCounts = new TreeMap<String, Counts>(vc);
    sortedCounts.putAll(counts);
    log.debug("empty nodes={} for collection={}", liveNodes, badReplica.collection.getName());
    log.debug("sorted hosts={} for collection={}", sortedCounts, badReplica.collection.getName());
    log.debug("unsuitable hosts={} for collection={}", unsuitableHosts, badReplica.collection.getName());
    return sortedCounts.keySet().iterator().next();
}
Also used : ClusterState(org.apache.solr.common.cloud.ClusterState) HashMap(java.util.HashMap) TreeMap(java.util.TreeMap) Replica(org.apache.solr.common.cloud.Replica) Slice(org.apache.solr.common.cloud.Slice) DocCollection(org.apache.solr.common.cloud.DocCollection) HashMap(java.util.HashMap) Map(java.util.Map) TreeMap(java.util.TreeMap) HashSet(java.util.HashSet)

Example 8 with ClusterState

use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.

the class ReplaceNodeCmd method call.

@Override
public void call(ClusterState state, ZkNodeProps message, NamedList results) throws Exception {
    ZkStateReader zkStateReader = ocmh.zkStateReader;
    ocmh.checkRequired(message, "source", "target");
    String source = message.getStr("source");
    String target = message.getStr("target");
    String async = message.getStr("async");
    boolean parallel = message.getBool("parallel", false);
    ClusterState clusterState = zkStateReader.getClusterState();
    if (!clusterState.liveNodesContain(source)) {
        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Source Node: " + source + " is not live");
    }
    if (!clusterState.liveNodesContain(target)) {
        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Target Node: " + target + " is not live");
    }
    List<ZkNodeProps> sourceReplicas = getReplicasOfNode(source, clusterState);
    List<ZkNodeProps> createdReplicas = new ArrayList<>();
    AtomicBoolean anyOneFailed = new AtomicBoolean(false);
    CountDownLatch countDownLatch = new CountDownLatch(sourceReplicas.size());
    for (ZkNodeProps sourceReplica : sourceReplicas) {
        NamedList nl = new NamedList();
        log.info("Going to create replica for collection={} shard={} on node={}", sourceReplica.getStr(COLLECTION_PROP), sourceReplica.getStr(SHARD_ID_PROP), target);
        ZkNodeProps msg = sourceReplica.plus("parallel", String.valueOf(parallel)).plus(CoreAdminParams.NODE, target);
        if (async != null)
            msg.getProperties().put(ASYNC, async);
        final ZkNodeProps addedReplica = ocmh.addReplica(clusterState, msg, nl, () -> {
            countDownLatch.countDown();
            if (nl.get("failure") != null) {
                String errorString = String.format(Locale.ROOT, "Failed to create replica for collection=%s shard=%s" + " on node=%s", sourceReplica.getStr(COLLECTION_PROP), sourceReplica.getStr(SHARD_ID_PROP), target);
                log.warn(errorString);
                // and exit
                synchronized (results) {
                    results.add("failure", errorString);
                    anyOneFailed.set(true);
                }
            } else {
                log.debug("Successfully created replica for collection={} shard={} on node={}", sourceReplica.getStr(COLLECTION_PROP), sourceReplica.getStr(SHARD_ID_PROP), target);
            }
        });
        if (addedReplica != null) {
            createdReplicas.add(addedReplica);
        }
    }
    log.debug("Waiting for replace node action to complete");
    countDownLatch.await(5, TimeUnit.MINUTES);
    log.debug("Finished waiting for replace node action to complete");
    if (anyOneFailed.get()) {
        log.info("Failed to create some replicas. Cleaning up all replicas on target node");
        CountDownLatch cleanupLatch = new CountDownLatch(createdReplicas.size());
        for (ZkNodeProps createdReplica : createdReplicas) {
            NamedList deleteResult = new NamedList();
            try {
                ocmh.deleteReplica(zkStateReader.getClusterState(), createdReplica.plus("parallel", "true"), deleteResult, () -> {
                    cleanupLatch.countDown();
                    if (deleteResult.get("failure") != null) {
                        synchronized (results) {
                            results.add("failure", "Could not cleanup, because of : " + deleteResult.get("failure"));
                        }
                    }
                });
            } catch (KeeperException e) {
                cleanupLatch.countDown();
                log.warn("Error deleting replica ", e);
            } catch (Exception e) {
                log.warn("Error deleting replica ", e);
                cleanupLatch.countDown();
                throw e;
            }
        }
        cleanupLatch.await(5, TimeUnit.MINUTES);
    }
    // we have reached this far means all replicas could be recreated
    //now cleanup the replicas in the source node
    DeleteNodeCmd.cleanupReplicas(results, state, sourceReplicas, ocmh, source, async);
    results.add("success", "REPLACENODE action completed successfully from  : " + source + " to : " + target);
}
Also used : ClusterState(org.apache.solr.common.cloud.ClusterState) NamedList(org.apache.solr.common.util.NamedList) ZkNodeProps(org.apache.solr.common.cloud.ZkNodeProps) ArrayList(java.util.ArrayList) CountDownLatch(java.util.concurrent.CountDownLatch) KeeperException(org.apache.zookeeper.KeeperException) SolrException(org.apache.solr.common.SolrException) ZkStateReader(org.apache.solr.common.cloud.ZkStateReader) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) SolrException(org.apache.solr.common.SolrException) KeeperException(org.apache.zookeeper.KeeperException)

Example 9 with ClusterState

use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.

the class HttpShardHandler method prepDistributed.

@Override
public void prepDistributed(ResponseBuilder rb) {
    final SolrQueryRequest req = rb.req;
    final SolrParams params = req.getParams();
    final String shards = params.get(ShardParams.SHARDS);
    // since the cost of grabbing cloud state is still up in the air, we grab it only
    // if we need it.
    ClusterState clusterState = null;
    Map<String, Slice> slices = null;
    CoreDescriptor coreDescriptor = req.getCore().getCoreDescriptor();
    CloudDescriptor cloudDescriptor = coreDescriptor.getCloudDescriptor();
    ZkController zkController = req.getCore().getCoreContainer().getZkController();
    final ReplicaListTransformer replicaListTransformer = httpShardHandlerFactory.getReplicaListTransformer(req);
    if (shards != null) {
        List<String> lst = StrUtils.splitSmart(shards, ",", true);
        rb.shards = lst.toArray(new String[lst.size()]);
        rb.slices = new String[rb.shards.length];
        if (zkController != null) {
            // figure out which shards are slices
            for (int i = 0; i < rb.shards.length; i++) {
                if (rb.shards[i].indexOf('/') < 0) {
                    // this is a logical shard
                    rb.slices[i] = rb.shards[i];
                    rb.shards[i] = null;
                }
            }
        }
    } else if (zkController != null) {
        // we weren't provided with an explicit list of slices to query via "shards", so use the cluster state
        clusterState = zkController.getClusterState();
        String shardKeys = params.get(ShardParams._ROUTE_);
        // This will be the complete list of slices we need to query for this request.
        slices = new HashMap<>();
        // we need to find out what collections this request is for.
        // A comma-separated list of specified collections.
        // Eg: "collection1,collection2,collection3"
        String collections = params.get("collection");
        if (collections != null) {
            // If there were one or more collections specified in the query, split
            // each parameter and store as a separate member of a List.
            List<String> collectionList = StrUtils.splitSmart(collections, ",", true);
            // cloud state and add them to the Map 'slices'.
            for (String collectionName : collectionList) {
                // The original code produced <collection-name>_<shard-name> when the collections
                // parameter was specified (see ClientUtils.appendMap)
                // Is this necessary if ony one collection is specified?
                // i.e. should we change multiCollection to collectionList.size() > 1?
                addSlices(slices, clusterState, params, collectionName, shardKeys, true);
            }
        } else {
            // just this collection
            String collectionName = cloudDescriptor.getCollectionName();
            addSlices(slices, clusterState, params, collectionName, shardKeys, false);
        }
        // Store the logical slices in the ResponseBuilder and create a new
        // String array to hold the physical shards (which will be mapped
        // later).
        rb.slices = slices.keySet().toArray(new String[slices.size()]);
        rb.shards = new String[rb.slices.length];
    }
    //
    if (zkController != null) {
        // Are we hosting the shard that this request is for, and are we active? If so, then handle it ourselves
        // and make it a non-distributed request.
        String ourSlice = cloudDescriptor.getShardId();
        String ourCollection = cloudDescriptor.getCollectionName();
        // Some requests may only be fulfilled by replicas of type Replica.Type.NRT
        boolean onlyNrtReplicas = Boolean.TRUE == req.getContext().get(ONLY_NRT_REPLICAS);
        if (rb.slices.length == 1 && rb.slices[0] != null && // handle the <collection>_<slice> format
        (rb.slices[0].equals(ourSlice) || rb.slices[0].equals(ourCollection + "_" + ourSlice)) && cloudDescriptor.getLastPublished() == Replica.State.ACTIVE && (!onlyNrtReplicas || cloudDescriptor.getReplicaType() == Replica.Type.NRT)) {
            // currently just a debugging parameter to check distrib search on a single node
            boolean shortCircuit = params.getBool("shortCircuit", true);
            String targetHandler = params.get(ShardParams.SHARDS_QT);
            // if a different handler is specified, don't short-circuit
            shortCircuit = shortCircuit && targetHandler == null;
            if (shortCircuit) {
                rb.isDistrib = false;
                rb.shortCircuitedURL = ZkCoreNodeProps.getCoreUrl(zkController.getBaseUrl(), coreDescriptor.getName());
                return;
            }
        // We shouldn't need to do anything to handle "shard.rows" since it was previously meant to be an optimization?
        }
        for (int i = 0; i < rb.shards.length; i++) {
            final List<String> shardUrls;
            if (rb.shards[i] != null) {
                shardUrls = StrUtils.splitSmart(rb.shards[i], "|", true);
                replicaListTransformer.transform(shardUrls);
            } else {
                if (clusterState == null) {
                    clusterState = zkController.getClusterState();
                    slices = clusterState.getSlicesMap(cloudDescriptor.getCollectionName());
                }
                String sliceName = rb.slices[i];
                Slice slice = slices.get(sliceName);
                if (slice == null) {
                    // Treat this the same as "all servers down" for a slice, and let things continue
                    // if partial results are acceptable
                    rb.shards[i] = "";
                    continue;
                // throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "no such shard: " + sliceName);
                }
                final Predicate<Replica> isShardLeader = new Predicate<Replica>() {

                    private Replica shardLeader = null;

                    @Override
                    public boolean test(Replica replica) {
                        if (shardLeader == null) {
                            try {
                                shardLeader = zkController.getZkStateReader().getLeaderRetry(cloudDescriptor.getCollectionName(), slice.getName());
                            } catch (InterruptedException e) {
                                throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, "Exception finding leader for shard " + slice.getName() + " in collection " + cloudDescriptor.getCollectionName(), e);
                            } catch (SolrException e) {
                                if (log.isDebugEnabled()) {
                                    log.debug("Exception finding leader for shard {} in collection {}. Collection State: {}", slice.getName(), cloudDescriptor.getCollectionName(), zkController.getZkStateReader().getClusterState().getCollectionOrNull(cloudDescriptor.getCollectionName()));
                                }
                                throw e;
                            }
                        }
                        return replica.getName().equals(shardLeader.getName());
                    }
                };
                final List<Replica> eligibleSliceReplicas = collectEligibleReplicas(slice, clusterState, onlyNrtReplicas, isShardLeader);
                replicaListTransformer.transform(eligibleSliceReplicas);
                shardUrls = new ArrayList<>(eligibleSliceReplicas.size());
                for (Replica replica : eligibleSliceReplicas) {
                    String url = ZkCoreNodeProps.getCoreUrl(replica);
                    shardUrls.add(url);
                }
                if (shardUrls.isEmpty()) {
                    boolean tolerant = rb.req.getParams().getBool(ShardParams.SHARDS_TOLERANT, false);
                    if (!tolerant) {
                        // stop the check when there are no replicas available for a shard
                        throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, "no servers hosting shard: " + rb.slices[i]);
                    }
                }
            }
            // And now recreate the | delimited list of equivalent servers
            rb.shards[i] = createSliceShardsStr(shardUrls);
        }
    }
    String shards_rows = params.get(ShardParams.SHARDS_ROWS);
    if (shards_rows != null) {
        rb.shards_rows = Integer.parseInt(shards_rows);
    }
    String shards_start = params.get(ShardParams.SHARDS_START);
    if (shards_start != null) {
        rb.shards_start = Integer.parseInt(shards_start);
    }
}
Also used : ClusterState(org.apache.solr.common.cloud.ClusterState) HashMap(java.util.HashMap) CoreDescriptor(org.apache.solr.core.CoreDescriptor) Replica(org.apache.solr.common.cloud.Replica) CloudDescriptor(org.apache.solr.cloud.CloudDescriptor) Predicate(java.util.function.Predicate) SolrQueryRequest(org.apache.solr.request.SolrQueryRequest) Slice(org.apache.solr.common.cloud.Slice) ZkController(org.apache.solr.cloud.ZkController) SolrParams(org.apache.solr.common.params.SolrParams) ModifiableSolrParams(org.apache.solr.common.params.ModifiableSolrParams) ArrayList(java.util.ArrayList) NamedList(org.apache.solr.common.util.NamedList) List(java.util.List) SolrException(org.apache.solr.common.SolrException)

Example 10 with ClusterState

use of org.apache.solr.common.cloud.ClusterState in project lucene-solr by apache.

the class SplitOp method execute.

@Override
public void execute(CoreAdminHandler.CallInfo it) throws Exception {
    SolrParams params = it.req.getParams();
    List<DocRouter.Range> ranges = null;
    String[] pathsArr = params.getParams(PATH);
    // ranges=a-b,c-d,e-f
    String rangesStr = params.get(CoreAdminParams.RANGES);
    if (rangesStr != null) {
        String[] rangesArr = rangesStr.split(",");
        if (rangesArr.length == 0) {
            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There must be at least one range specified to split an index");
        } else {
            ranges = new ArrayList<>(rangesArr.length);
            for (String r : rangesArr) {
                try {
                    ranges.add(DocRouter.DEFAULT.fromString(r));
                } catch (Exception e) {
                    throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Exception parsing hexadecimal hash range: " + r, e);
                }
            }
        }
    }
    String splitKey = params.get("split.key");
    String[] newCoreNames = params.getParams("targetCore");
    String cname = params.get(CoreAdminParams.CORE, "");
    if ((pathsArr == null || pathsArr.length == 0) && (newCoreNames == null || newCoreNames.length == 0)) {
        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Either path or targetCore param must be specified");
    }
    log.info("Invoked split action for core: " + cname);
    SolrCore core = it.handler.coreContainer.getCore(cname);
    SolrQueryRequest req = new LocalSolrQueryRequest(core, params);
    List<SolrCore> newCores = null;
    try {
        // TODO: allow use of rangesStr in the future
        List<String> paths = null;
        int partitions = pathsArr != null ? pathsArr.length : newCoreNames.length;
        DocRouter router = null;
        String routeFieldName = null;
        if (it.handler.coreContainer.isZooKeeperAware()) {
            ClusterState clusterState = it.handler.coreContainer.getZkController().getClusterState();
            String collectionName = req.getCore().getCoreDescriptor().getCloudDescriptor().getCollectionName();
            DocCollection collection = clusterState.getCollection(collectionName);
            String sliceName = req.getCore().getCoreDescriptor().getCloudDescriptor().getShardId();
            Slice slice = collection.getSlice(sliceName);
            router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
            if (ranges == null) {
                DocRouter.Range currentRange = slice.getRange();
                ranges = currentRange != null ? router.partitionRange(partitions, currentRange) : null;
            }
            // for back-compat with Solr 4.4
            Object routerObj = collection.get(DOC_ROUTER);
            if (routerObj != null && routerObj instanceof Map) {
                Map routerProps = (Map) routerObj;
                routeFieldName = (String) routerProps.get("field");
            }
        }
        if (pathsArr == null) {
            newCores = new ArrayList<>(partitions);
            for (String newCoreName : newCoreNames) {
                SolrCore newcore = it.handler.coreContainer.getCore(newCoreName);
                if (newcore != null) {
                    newCores.add(newcore);
                } else {
                    throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Core with core name " + newCoreName + " expected but doesn't exist.");
                }
            }
        } else {
            paths = Arrays.asList(pathsArr);
        }
        SplitIndexCommand cmd = new SplitIndexCommand(req, paths, newCores, ranges, router, routeFieldName, splitKey);
        core.getUpdateHandler().split(cmd);
    // After the split has completed, someone (here?) should start the process of replaying the buffered updates.
    } catch (Exception e) {
        log.error("ERROR executing split:", e);
        throw new RuntimeException(e);
    } finally {
        if (req != null)
            req.close();
        if (core != null)
            core.close();
        if (newCores != null) {
            for (SolrCore newCore : newCores) {
                newCore.close();
            }
        }
    }
}
Also used : SplitIndexCommand(org.apache.solr.update.SplitIndexCommand) ClusterState(org.apache.solr.common.cloud.ClusterState) SolrCore(org.apache.solr.core.SolrCore) SolrException(org.apache.solr.common.SolrException) LocalSolrQueryRequest(org.apache.solr.request.LocalSolrQueryRequest) LocalSolrQueryRequest(org.apache.solr.request.LocalSolrQueryRequest) SolrQueryRequest(org.apache.solr.request.SolrQueryRequest) Slice(org.apache.solr.common.cloud.Slice) DocRouter(org.apache.solr.common.cloud.DocRouter) SolrParams(org.apache.solr.common.params.SolrParams) DocCollection(org.apache.solr.common.cloud.DocCollection) Map(java.util.Map) SolrException(org.apache.solr.common.SolrException)

Aggregations

ClusterState (org.apache.solr.common.cloud.ClusterState)122 Slice (org.apache.solr.common.cloud.Slice)78 Replica (org.apache.solr.common.cloud.Replica)65 ZkStateReader (org.apache.solr.common.cloud.ZkStateReader)56 DocCollection (org.apache.solr.common.cloud.DocCollection)49 HashMap (java.util.HashMap)42 ArrayList (java.util.ArrayList)36 Map (java.util.Map)25 IOException (java.io.IOException)20 Test (org.junit.Test)18 HashSet (java.util.HashSet)17 SolrException (org.apache.solr.common.SolrException)16 HttpSolrClient (org.apache.solr.client.solrj.impl.HttpSolrClient)15 SolrQuery (org.apache.solr.client.solrj.SolrQuery)13 JettySolrRunner (org.apache.solr.client.solrj.embedded.JettySolrRunner)13 CloudSolrClient (org.apache.solr.client.solrj.impl.CloudSolrClient)13 ZkCoreNodeProps (org.apache.solr.common.cloud.ZkCoreNodeProps)13 ZkNodeProps (org.apache.solr.common.cloud.ZkNodeProps)13 List (java.util.List)12 ModifiableSolrParams (org.apache.solr.common.params.ModifiableSolrParams)12