use of org.apache.solr.request.SolrQueryRequest in project lucene-solr by apache.
the class ExpandComponent method process.
@SuppressWarnings("unchecked")
@Override
public void process(ResponseBuilder rb) throws IOException {
if (!rb.doExpand) {
return;
}
SolrQueryRequest req = rb.req;
SolrParams params = req.getParams();
String field = params.get(ExpandParams.EXPAND_FIELD);
String hint = null;
if (field == null) {
List<Query> filters = rb.getFilters();
if (filters != null) {
for (Query q : filters) {
if (q instanceof CollapsingQParserPlugin.CollapsingPostFilter) {
CollapsingQParserPlugin.CollapsingPostFilter cp = (CollapsingQParserPlugin.CollapsingPostFilter) q;
field = cp.getField();
hint = cp.hint;
}
}
}
}
if (field == null) {
throw new IOException("Expand field is null.");
}
String sortParam = params.get(ExpandParams.EXPAND_SORT);
String[] fqs = params.getParams(ExpandParams.EXPAND_FQ);
String qs = params.get(ExpandParams.EXPAND_Q);
int limit = params.getInt(ExpandParams.EXPAND_ROWS, 5);
Sort sort = null;
if (sortParam != null) {
sort = SortSpecParsing.parseSortSpec(sortParam, rb.req).getSort();
}
Query query;
if (qs == null) {
query = rb.getQuery();
} else {
try {
QParser parser = QParser.getParser(qs, req);
query = parser.getQuery();
} catch (Exception e) {
throw new IOException(e);
}
}
List<Query> newFilters = new ArrayList<>();
if (fqs == null) {
List<Query> filters = rb.getFilters();
if (filters != null) {
for (Query q : filters) {
if (!(q instanceof CollapsingQParserPlugin.CollapsingPostFilter)) {
newFilters.add(q);
}
}
}
} else {
try {
for (String fq : fqs) {
if (fq != null && fq.trim().length() != 0 && !fq.equals("*:*")) {
QParser fqp = QParser.getParser(fq, req);
newFilters.add(fqp.getQuery());
}
}
} catch (Exception e) {
throw new IOException(e);
}
}
SolrIndexSearcher searcher = req.getSearcher();
LeafReader reader = searcher.getSlowAtomicReader();
SchemaField schemaField = searcher.getSchema().getField(field);
FieldType fieldType = schemaField.getType();
SortedDocValues values = null;
long nullValue = 0L;
if (fieldType instanceof StrField) {
//Get The Top Level SortedDocValues
if (CollapsingQParserPlugin.HINT_TOP_FC.equals(hint)) {
Map<String, UninvertingReader.Type> mapping = new HashMap();
mapping.put(field, UninvertingReader.Type.SORTED);
UninvertingReader uninvertingReader = new UninvertingReader(new ReaderWrapper(searcher.getSlowAtomicReader(), field), mapping);
values = uninvertingReader.getSortedDocValues(field);
} else {
values = DocValues.getSorted(reader, field);
}
} else {
//Get the nullValue for the numeric collapse field
String defaultValue = searcher.getSchema().getField(field).getDefaultValue();
final NumberType numType = fieldType.getNumberType();
// we don't need to handle invalid 64-bit field types here.
if (defaultValue != null) {
if (numType == NumberType.INTEGER) {
nullValue = Long.parseLong(defaultValue);
} else if (numType == NumberType.FLOAT) {
nullValue = Float.floatToIntBits(Float.parseFloat(defaultValue));
}
} else if (NumberType.FLOAT.equals(numType)) {
// Integer case already handled by nullValue defaulting to 0
nullValue = Float.floatToIntBits(0.0f);
}
}
FixedBitSet groupBits = null;
LongHashSet groupSet = null;
DocList docList = rb.getResults().docList;
IntHashSet collapsedSet = new IntHashSet(docList.size() * 2);
//Gather the groups for the current page of documents
DocIterator idit = docList.iterator();
int[] globalDocs = new int[docList.size()];
int docsIndex = -1;
while (idit.hasNext()) {
globalDocs[++docsIndex] = idit.nextDoc();
}
Arrays.sort(globalDocs);
Query groupQuery = null;
/*
* This code gathers the group information for the current page.
*/
List<LeafReaderContext> contexts = searcher.getTopReaderContext().leaves();
if (contexts.size() == 0) {
//When no context is available we can skip the expanding
return;
}
int currentContext = 0;
int currentDocBase = contexts.get(currentContext).docBase;
int nextDocBase = (currentContext + 1) < contexts.size() ? contexts.get(currentContext + 1).docBase : Integer.MAX_VALUE;
IntObjectHashMap<BytesRef> ordBytes = null;
if (values != null) {
groupBits = new FixedBitSet(values.getValueCount());
MultiDocValues.OrdinalMap ordinalMap = null;
SortedDocValues[] sortedDocValues = null;
LongValues segmentOrdinalMap = null;
SortedDocValues currentValues = null;
if (values instanceof MultiDocValues.MultiSortedDocValues) {
ordinalMap = ((MultiDocValues.MultiSortedDocValues) values).mapping;
sortedDocValues = ((MultiDocValues.MultiSortedDocValues) values).values;
currentValues = sortedDocValues[currentContext];
segmentOrdinalMap = ordinalMap.getGlobalOrds(currentContext);
}
int count = 0;
ordBytes = new IntObjectHashMap<>();
for (int i = 0; i < globalDocs.length; i++) {
int globalDoc = globalDocs[i];
while (globalDoc >= nextDocBase) {
currentContext++;
currentDocBase = contexts.get(currentContext).docBase;
nextDocBase = (currentContext + 1) < contexts.size() ? contexts.get(currentContext + 1).docBase : Integer.MAX_VALUE;
if (ordinalMap != null) {
currentValues = sortedDocValues[currentContext];
segmentOrdinalMap = ordinalMap.getGlobalOrds(currentContext);
}
}
int contextDoc = globalDoc - currentDocBase;
if (ordinalMap != null) {
if (contextDoc > currentValues.docID()) {
currentValues.advance(contextDoc);
}
if (contextDoc == currentValues.docID()) {
int ord = currentValues.ordValue();
++count;
BytesRef ref = currentValues.lookupOrd(ord);
ord = (int) segmentOrdinalMap.get(ord);
ordBytes.put(ord, BytesRef.deepCopyOf(ref));
groupBits.set(ord);
collapsedSet.add(globalDoc);
}
} else {
if (globalDoc > values.docID()) {
values.advance(globalDoc);
}
if (globalDoc == values.docID()) {
int ord = values.ordValue();
++count;
BytesRef ref = values.lookupOrd(ord);
ordBytes.put(ord, BytesRef.deepCopyOf(ref));
groupBits.set(ord);
collapsedSet.add(globalDoc);
}
}
}
if (count > 0 && count < 200) {
try {
groupQuery = getGroupQuery(field, count, ordBytes);
} catch (Exception e) {
throw new IOException(e);
}
}
} else {
groupSet = new LongHashSet(docList.size());
NumericDocValues collapseValues = contexts.get(currentContext).reader().getNumericDocValues(field);
int count = 0;
for (int i = 0; i < globalDocs.length; i++) {
int globalDoc = globalDocs[i];
while (globalDoc >= nextDocBase) {
currentContext++;
currentDocBase = contexts.get(currentContext).docBase;
nextDocBase = currentContext + 1 < contexts.size() ? contexts.get(currentContext + 1).docBase : Integer.MAX_VALUE;
collapseValues = contexts.get(currentContext).reader().getNumericDocValues(field);
}
int contextDoc = globalDoc - currentDocBase;
int valueDocID = collapseValues.docID();
if (valueDocID < contextDoc) {
valueDocID = collapseValues.advance(contextDoc);
}
long value;
if (valueDocID == contextDoc) {
value = collapseValues.longValue();
} else {
value = 0;
}
if (value != nullValue) {
++count;
groupSet.add(value);
collapsedSet.add(globalDoc);
}
}
if (count > 0 && count < 200) {
if (fieldType.isPointField()) {
groupQuery = getPointGroupQuery(schemaField, count, groupSet);
} else {
groupQuery = getGroupQuery(field, fieldType, count, groupSet);
}
}
}
Collector collector;
if (sort != null)
sort = sort.rewrite(searcher);
Collector groupExpandCollector = null;
if (values != null) {
//Get The Top Level SortedDocValues again so we can re-iterate:
if (CollapsingQParserPlugin.HINT_TOP_FC.equals(hint)) {
Map<String, UninvertingReader.Type> mapping = new HashMap();
mapping.put(field, UninvertingReader.Type.SORTED);
UninvertingReader uninvertingReader = new UninvertingReader(new ReaderWrapper(searcher.getSlowAtomicReader(), field), mapping);
values = uninvertingReader.getSortedDocValues(field);
} else {
values = DocValues.getSorted(reader, field);
}
groupExpandCollector = new GroupExpandCollector(values, groupBits, collapsedSet, limit, sort);
} else {
groupExpandCollector = new NumericGroupExpandCollector(field, nullValue, groupSet, collapsedSet, limit, sort);
}
if (groupQuery != null) {
//Limits the results to documents that are in the same group as the documents in the page.
newFilters.add(groupQuery);
}
SolrIndexSearcher.ProcessedFilter pfilter = searcher.getProcessedFilter(null, newFilters);
if (pfilter.postFilter != null) {
pfilter.postFilter.setLastDelegate(groupExpandCollector);
collector = pfilter.postFilter;
} else {
collector = groupExpandCollector;
}
if (pfilter.filter == null) {
searcher.search(query, collector);
} else {
Query q = new BooleanQuery.Builder().add(query, Occur.MUST).add(pfilter.filter, Occur.FILTER).build();
searcher.search(q, collector);
}
LongObjectMap<Collector> groups = ((GroupCollector) groupExpandCollector).getGroups();
NamedList outMap = new SimpleOrderedMap();
CharsRefBuilder charsRef = new CharsRefBuilder();
for (LongObjectCursor<Collector> cursor : groups) {
long groupValue = cursor.key;
TopDocsCollector<?> topDocsCollector = TopDocsCollector.class.cast(cursor.value);
TopDocs topDocs = topDocsCollector.topDocs();
ScoreDoc[] scoreDocs = topDocs.scoreDocs;
if (scoreDocs.length > 0) {
int[] docs = new int[scoreDocs.length];
float[] scores = new float[scoreDocs.length];
for (int i = 0; i < docs.length; i++) {
ScoreDoc scoreDoc = scoreDocs[i];
docs[i] = scoreDoc.doc;
scores[i] = scoreDoc.score;
}
DocSlice slice = new DocSlice(0, docs.length, docs, scores, topDocs.totalHits, topDocs.getMaxScore());
if (fieldType instanceof StrField) {
final BytesRef bytesRef = ordBytes.get((int) groupValue);
fieldType.indexedToReadable(bytesRef, charsRef);
String group = charsRef.toString();
outMap.add(group, slice);
} else {
outMap.add(numericToString(fieldType, groupValue), slice);
}
}
}
rb.rsp.add("expanded", outMap);
}
use of org.apache.solr.request.SolrQueryRequest in project lucene-solr by apache.
the class HighlightComponent method process.
@Override
public void process(ResponseBuilder rb) throws IOException {
if (rb.doHighlights) {
SolrQueryRequest req = rb.req;
SolrParams params = req.getParams();
SolrHighlighter highlighter = getHighlighter(params);
//TODO: get from builder by default?
String[] defaultHighlightFields = rb.getQparser() != null ? rb.getQparser().getDefaultHighlightFields() : null;
Query highlightQuery = rb.getHighlightQuery();
if (highlightQuery == null) {
if (rb.getQparser() != null) {
try {
highlightQuery = rb.getQparser().getHighlightQuery();
rb.setHighlightQuery(highlightQuery);
} catch (Exception e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
}
} else {
highlightQuery = rb.getQuery();
rb.setHighlightQuery(highlightQuery);
}
}
// No highlighting if there is no query -- consider q.alt=*:*
if (highlightQuery != null) {
NamedList sumData = highlighter.doHighlighting(rb.getResults().docList, highlightQuery, req, defaultHighlightFields);
if (sumData != null) {
// TODO ???? add this directly to the response?
rb.rsp.add("highlighting", sumData);
}
}
}
}
use of org.apache.solr.request.SolrQueryRequest in project lucene-solr by apache.
the class HttpShardHandler method prepDistributed.
@Override
public void prepDistributed(ResponseBuilder rb) {
final SolrQueryRequest req = rb.req;
final SolrParams params = req.getParams();
final String shards = params.get(ShardParams.SHARDS);
// since the cost of grabbing cloud state is still up in the air, we grab it only
// if we need it.
ClusterState clusterState = null;
Map<String, Slice> slices = null;
CoreDescriptor coreDescriptor = req.getCore().getCoreDescriptor();
CloudDescriptor cloudDescriptor = coreDescriptor.getCloudDescriptor();
ZkController zkController = req.getCore().getCoreContainer().getZkController();
final ReplicaListTransformer replicaListTransformer = httpShardHandlerFactory.getReplicaListTransformer(req);
if (shards != null) {
List<String> lst = StrUtils.splitSmart(shards, ",", true);
rb.shards = lst.toArray(new String[lst.size()]);
rb.slices = new String[rb.shards.length];
if (zkController != null) {
// figure out which shards are slices
for (int i = 0; i < rb.shards.length; i++) {
if (rb.shards[i].indexOf('/') < 0) {
// this is a logical shard
rb.slices[i] = rb.shards[i];
rb.shards[i] = null;
}
}
}
} else if (zkController != null) {
// we weren't provided with an explicit list of slices to query via "shards", so use the cluster state
clusterState = zkController.getClusterState();
String shardKeys = params.get(ShardParams._ROUTE_);
// This will be the complete list of slices we need to query for this request.
slices = new HashMap<>();
// we need to find out what collections this request is for.
// A comma-separated list of specified collections.
// Eg: "collection1,collection2,collection3"
String collections = params.get("collection");
if (collections != null) {
// If there were one or more collections specified in the query, split
// each parameter and store as a separate member of a List.
List<String> collectionList = StrUtils.splitSmart(collections, ",", true);
// cloud state and add them to the Map 'slices'.
for (String collectionName : collectionList) {
// The original code produced <collection-name>_<shard-name> when the collections
// parameter was specified (see ClientUtils.appendMap)
// Is this necessary if ony one collection is specified?
// i.e. should we change multiCollection to collectionList.size() > 1?
addSlices(slices, clusterState, params, collectionName, shardKeys, true);
}
} else {
// just this collection
String collectionName = cloudDescriptor.getCollectionName();
addSlices(slices, clusterState, params, collectionName, shardKeys, false);
}
// Store the logical slices in the ResponseBuilder and create a new
// String array to hold the physical shards (which will be mapped
// later).
rb.slices = slices.keySet().toArray(new String[slices.size()]);
rb.shards = new String[rb.slices.length];
}
//
if (zkController != null) {
// Are we hosting the shard that this request is for, and are we active? If so, then handle it ourselves
// and make it a non-distributed request.
String ourSlice = cloudDescriptor.getShardId();
String ourCollection = cloudDescriptor.getCollectionName();
// Some requests may only be fulfilled by replicas of type Replica.Type.NRT
boolean onlyNrtReplicas = Boolean.TRUE == req.getContext().get(ONLY_NRT_REPLICAS);
if (rb.slices.length == 1 && rb.slices[0] != null && // handle the <collection>_<slice> format
(rb.slices[0].equals(ourSlice) || rb.slices[0].equals(ourCollection + "_" + ourSlice)) && cloudDescriptor.getLastPublished() == Replica.State.ACTIVE && (!onlyNrtReplicas || cloudDescriptor.getReplicaType() == Replica.Type.NRT)) {
// currently just a debugging parameter to check distrib search on a single node
boolean shortCircuit = params.getBool("shortCircuit", true);
String targetHandler = params.get(ShardParams.SHARDS_QT);
// if a different handler is specified, don't short-circuit
shortCircuit = shortCircuit && targetHandler == null;
if (shortCircuit) {
rb.isDistrib = false;
rb.shortCircuitedURL = ZkCoreNodeProps.getCoreUrl(zkController.getBaseUrl(), coreDescriptor.getName());
return;
}
// We shouldn't need to do anything to handle "shard.rows" since it was previously meant to be an optimization?
}
for (int i = 0; i < rb.shards.length; i++) {
final List<String> shardUrls;
if (rb.shards[i] != null) {
shardUrls = StrUtils.splitSmart(rb.shards[i], "|", true);
replicaListTransformer.transform(shardUrls);
} else {
if (clusterState == null) {
clusterState = zkController.getClusterState();
slices = clusterState.getSlicesMap(cloudDescriptor.getCollectionName());
}
String sliceName = rb.slices[i];
Slice slice = slices.get(sliceName);
if (slice == null) {
// Treat this the same as "all servers down" for a slice, and let things continue
// if partial results are acceptable
rb.shards[i] = "";
continue;
// throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "no such shard: " + sliceName);
}
final Predicate<Replica> isShardLeader = new Predicate<Replica>() {
private Replica shardLeader = null;
@Override
public boolean test(Replica replica) {
if (shardLeader == null) {
try {
shardLeader = zkController.getZkStateReader().getLeaderRetry(cloudDescriptor.getCollectionName(), slice.getName());
} catch (InterruptedException e) {
throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, "Exception finding leader for shard " + slice.getName() + " in collection " + cloudDescriptor.getCollectionName(), e);
} catch (SolrException e) {
if (log.isDebugEnabled()) {
log.debug("Exception finding leader for shard {} in collection {}. Collection State: {}", slice.getName(), cloudDescriptor.getCollectionName(), zkController.getZkStateReader().getClusterState().getCollectionOrNull(cloudDescriptor.getCollectionName()));
}
throw e;
}
}
return replica.getName().equals(shardLeader.getName());
}
};
final List<Replica> eligibleSliceReplicas = collectEligibleReplicas(slice, clusterState, onlyNrtReplicas, isShardLeader);
replicaListTransformer.transform(eligibleSliceReplicas);
shardUrls = new ArrayList<>(eligibleSliceReplicas.size());
for (Replica replica : eligibleSliceReplicas) {
String url = ZkCoreNodeProps.getCoreUrl(replica);
shardUrls.add(url);
}
if (shardUrls.isEmpty()) {
boolean tolerant = rb.req.getParams().getBool(ShardParams.SHARDS_TOLERANT, false);
if (!tolerant) {
// stop the check when there are no replicas available for a shard
throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, "no servers hosting shard: " + rb.slices[i]);
}
}
}
// And now recreate the | delimited list of equivalent servers
rb.shards[i] = createSliceShardsStr(shardUrls);
}
}
String shards_rows = params.get(ShardParams.SHARDS_ROWS);
if (shards_rows != null) {
rb.shards_rows = Integer.parseInt(shards_rows);
}
String shards_start = params.get(ShardParams.SHARDS_START);
if (shards_start != null) {
rb.shards_start = Integer.parseInt(shards_start);
}
}
use of org.apache.solr.request.SolrQueryRequest in project lucene-solr by apache.
the class MergeIndexesOp method execute.
@Override
public void execute(CoreAdminHandler.CallInfo it) throws Exception {
SolrParams params = it.req.getParams();
String cname = params.required().get(CoreAdminParams.CORE);
SolrCore core = it.handler.coreContainer.getCore(cname);
SolrQueryRequest wrappedReq = null;
List<SolrCore> sourceCores = Lists.newArrayList();
List<RefCounted<SolrIndexSearcher>> searchers = Lists.newArrayList();
// stores readers created from indexDir param values
List<DirectoryReader> readersToBeClosed = Lists.newArrayList();
Map<Directory, Boolean> dirsToBeReleased = new HashMap<>();
if (core != null) {
try {
String[] dirNames = params.getParams(CoreAdminParams.INDEX_DIR);
if (dirNames == null || dirNames.length == 0) {
String[] sources = params.getParams("srcCore");
if (sources == null || sources.length == 0)
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "At least one indexDir or srcCore must be specified");
for (int i = 0; i < sources.length; i++) {
String source = sources[i];
SolrCore srcCore = it.handler.coreContainer.getCore(source);
if (srcCore == null)
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Core: " + source + " does not exist");
sourceCores.add(srcCore);
}
} else {
DirectoryFactory dirFactory = core.getDirectoryFactory();
for (int i = 0; i < dirNames.length; i++) {
boolean markAsDone = false;
if (dirFactory instanceof CachingDirectoryFactory) {
if (!((CachingDirectoryFactory) dirFactory).getLivePaths().contains(dirNames[i])) {
markAsDone = true;
}
}
Directory dir = dirFactory.get(dirNames[i], DirectoryFactory.DirContext.DEFAULT, core.getSolrConfig().indexConfig.lockType);
dirsToBeReleased.put(dir, markAsDone);
// TODO: why doesn't this use the IR factory? what is going on here?
readersToBeClosed.add(DirectoryReader.open(dir));
}
}
List<DirectoryReader> readers = null;
if (readersToBeClosed.size() > 0) {
readers = readersToBeClosed;
} else {
readers = Lists.newArrayList();
for (SolrCore solrCore : sourceCores) {
// record the searchers so that we can decref
RefCounted<SolrIndexSearcher> searcher = solrCore.getSearcher();
searchers.add(searcher);
readers.add(searcher.get().getIndexReader());
}
}
UpdateRequestProcessorChain processorChain = core.getUpdateProcessingChain(params.get(UpdateParams.UPDATE_CHAIN));
wrappedReq = new LocalSolrQueryRequest(core, it.req.getParams());
UpdateRequestProcessor processor = processorChain.createProcessor(wrappedReq, it.rsp);
processor.processMergeIndexes(new MergeIndexesCommand(readers, it.req));
} catch (Exception e) {
// log and rethrow so that if the finally fails we don't lose the original problem
log.error("ERROR executing merge:", e);
throw e;
} finally {
for (RefCounted<SolrIndexSearcher> searcher : searchers) {
if (searcher != null)
searcher.decref();
}
for (SolrCore solrCore : sourceCores) {
if (solrCore != null)
solrCore.close();
}
IOUtils.closeWhileHandlingException(readersToBeClosed);
Set<Map.Entry<Directory, Boolean>> entries = dirsToBeReleased.entrySet();
for (Map.Entry<Directory, Boolean> entry : entries) {
DirectoryFactory dirFactory = core.getDirectoryFactory();
Directory dir = entry.getKey();
boolean markAsDone = entry.getValue();
if (markAsDone) {
dirFactory.doneWithDirectory(dir);
}
dirFactory.release(dir);
}
if (wrappedReq != null)
wrappedReq.close();
core.close();
}
}
}
use of org.apache.solr.request.SolrQueryRequest in project lucene-solr by apache.
the class SplitOp method execute.
@Override
public void execute(CoreAdminHandler.CallInfo it) throws Exception {
SolrParams params = it.req.getParams();
List<DocRouter.Range> ranges = null;
String[] pathsArr = params.getParams(PATH);
// ranges=a-b,c-d,e-f
String rangesStr = params.get(CoreAdminParams.RANGES);
if (rangesStr != null) {
String[] rangesArr = rangesStr.split(",");
if (rangesArr.length == 0) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There must be at least one range specified to split an index");
} else {
ranges = new ArrayList<>(rangesArr.length);
for (String r : rangesArr) {
try {
ranges.add(DocRouter.DEFAULT.fromString(r));
} catch (Exception e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Exception parsing hexadecimal hash range: " + r, e);
}
}
}
}
String splitKey = params.get("split.key");
String[] newCoreNames = params.getParams("targetCore");
String cname = params.get(CoreAdminParams.CORE, "");
if ((pathsArr == null || pathsArr.length == 0) && (newCoreNames == null || newCoreNames.length == 0)) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Either path or targetCore param must be specified");
}
log.info("Invoked split action for core: " + cname);
SolrCore core = it.handler.coreContainer.getCore(cname);
SolrQueryRequest req = new LocalSolrQueryRequest(core, params);
List<SolrCore> newCores = null;
try {
// TODO: allow use of rangesStr in the future
List<String> paths = null;
int partitions = pathsArr != null ? pathsArr.length : newCoreNames.length;
DocRouter router = null;
String routeFieldName = null;
if (it.handler.coreContainer.isZooKeeperAware()) {
ClusterState clusterState = it.handler.coreContainer.getZkController().getClusterState();
String collectionName = req.getCore().getCoreDescriptor().getCloudDescriptor().getCollectionName();
DocCollection collection = clusterState.getCollection(collectionName);
String sliceName = req.getCore().getCoreDescriptor().getCloudDescriptor().getShardId();
Slice slice = collection.getSlice(sliceName);
router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
if (ranges == null) {
DocRouter.Range currentRange = slice.getRange();
ranges = currentRange != null ? router.partitionRange(partitions, currentRange) : null;
}
// for back-compat with Solr 4.4
Object routerObj = collection.get(DOC_ROUTER);
if (routerObj != null && routerObj instanceof Map) {
Map routerProps = (Map) routerObj;
routeFieldName = (String) routerProps.get("field");
}
}
if (pathsArr == null) {
newCores = new ArrayList<>(partitions);
for (String newCoreName : newCoreNames) {
SolrCore newcore = it.handler.coreContainer.getCore(newCoreName);
if (newcore != null) {
newCores.add(newcore);
} else {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Core with core name " + newCoreName + " expected but doesn't exist.");
}
}
} else {
paths = Arrays.asList(pathsArr);
}
SplitIndexCommand cmd = new SplitIndexCommand(req, paths, newCores, ranges, router, routeFieldName, splitKey);
core.getUpdateHandler().split(cmd);
// After the split has completed, someone (here?) should start the process of replaying the buffered updates.
} catch (Exception e) {
log.error("ERROR executing split:", e);
throw new RuntimeException(e);
} finally {
if (req != null)
req.close();
if (core != null)
core.close();
if (newCores != null) {
for (SolrCore newCore : newCores) {
newCore.close();
}
}
}
}
Aggregations