use of org.apache.solr.common.cloud.DocRouter in project lucene-solr by apache.
the class ShardSplitTest method getHashRangeIdx.
public static int getHashRangeIdx(DocRouter router, List<DocRouter.Range> ranges, String id) {
int hash = 0;
if (router instanceof HashBasedRouter) {
HashBasedRouter hashBasedRouter = (HashBasedRouter) router;
hash = hashBasedRouter.sliceHash(id, null, null, null);
}
for (int i = 0; i < ranges.size(); i++) {
DocRouter.Range range = ranges.get(i);
if (range.includes(hash))
return i;
}
return -1;
}
use of org.apache.solr.common.cloud.DocRouter in project lucene-solr by apache.
the class AssignTest method testAssignNode.
@Test
public void testAssignNode() throws Exception {
String cname = "collection1";
Map<String, DocCollection> collectionStates = new HashMap<>();
Map<String, Slice> slices = new HashMap<>();
Map<String, Replica> replicas = new HashMap<>();
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, "state", ZkStateReader.STATE_PROP, Replica.State.ACTIVE.toString(), ZkStateReader.BASE_URL_PROP, "0.0.0.0", ZkStateReader.CORE_NAME_PROP, "core1", ZkStateReader.ROLES_PROP, null, ZkStateReader.NODE_NAME_PROP, "0_0_0_0", ZkStateReader.SHARD_ID_PROP, "shard1", ZkStateReader.COLLECTION_PROP, cname, ZkStateReader.NUM_SHARDS_PROP, "1", ZkStateReader.CORE_NODE_NAME_PROP, "core_node1");
Replica replica = new Replica("core_node1", m.getProperties());
replicas.put("core_node1", replica);
Slice slice = new Slice("slice1", replicas, new HashMap<String, Object>(0));
slices.put("slice1", slice);
DocRouter router = new ImplicitDocRouter();
DocCollection docCollection = new DocCollection(cname, slices, new HashMap<String, Object>(0), router);
collectionStates.put(cname, docCollection);
Set<String> liveNodes = new HashSet<>();
ClusterState state = new ClusterState(-1, liveNodes, collectionStates);
String nodeName = Assign.assignNode(state.getCollection("collection1"));
assertEquals("core_node2", nodeName);
}
use of org.apache.solr.common.cloud.DocRouter in project lucene-solr by apache.
the class SplitOp method execute.
@Override
public void execute(CoreAdminHandler.CallInfo it) throws Exception {
SolrParams params = it.req.getParams();
List<DocRouter.Range> ranges = null;
String[] pathsArr = params.getParams(PATH);
// ranges=a-b,c-d,e-f
String rangesStr = params.get(CoreAdminParams.RANGES);
if (rangesStr != null) {
String[] rangesArr = rangesStr.split(",");
if (rangesArr.length == 0) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There must be at least one range specified to split an index");
} else {
ranges = new ArrayList<>(rangesArr.length);
for (String r : rangesArr) {
try {
ranges.add(DocRouter.DEFAULT.fromString(r));
} catch (Exception e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Exception parsing hexadecimal hash range: " + r, e);
}
}
}
}
String splitKey = params.get("split.key");
String[] newCoreNames = params.getParams("targetCore");
String cname = params.get(CoreAdminParams.CORE, "");
if ((pathsArr == null || pathsArr.length == 0) && (newCoreNames == null || newCoreNames.length == 0)) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Either path or targetCore param must be specified");
}
log.info("Invoked split action for core: " + cname);
SolrCore core = it.handler.coreContainer.getCore(cname);
SolrQueryRequest req = new LocalSolrQueryRequest(core, params);
List<SolrCore> newCores = null;
try {
// TODO: allow use of rangesStr in the future
List<String> paths = null;
int partitions = pathsArr != null ? pathsArr.length : newCoreNames.length;
DocRouter router = null;
String routeFieldName = null;
if (it.handler.coreContainer.isZooKeeperAware()) {
ClusterState clusterState = it.handler.coreContainer.getZkController().getClusterState();
String collectionName = req.getCore().getCoreDescriptor().getCloudDescriptor().getCollectionName();
DocCollection collection = clusterState.getCollection(collectionName);
String sliceName = req.getCore().getCoreDescriptor().getCloudDescriptor().getShardId();
Slice slice = collection.getSlice(sliceName);
router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
if (ranges == null) {
DocRouter.Range currentRange = slice.getRange();
ranges = currentRange != null ? router.partitionRange(partitions, currentRange) : null;
}
// for back-compat with Solr 4.4
Object routerObj = collection.get(DOC_ROUTER);
if (routerObj != null && routerObj instanceof Map) {
Map routerProps = (Map) routerObj;
routeFieldName = (String) routerProps.get("field");
}
}
if (pathsArr == null) {
newCores = new ArrayList<>(partitions);
for (String newCoreName : newCoreNames) {
SolrCore newcore = it.handler.coreContainer.getCore(newCoreName);
if (newcore != null) {
newCores.add(newcore);
} else {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Core with core name " + newCoreName + " expected but doesn't exist.");
}
}
} else {
paths = Arrays.asList(pathsArr);
}
SplitIndexCommand cmd = new SplitIndexCommand(req, paths, newCores, ranges, router, routeFieldName, splitKey);
core.getUpdateHandler().split(cmd);
// After the split has completed, someone (here?) should start the process of replaying the buffered updates.
} catch (Exception e) {
log.error("ERROR executing split:", e);
throw new RuntimeException(e);
} finally {
if (req != null)
req.close();
if (core != null)
core.close();
if (newCores != null) {
for (SolrCore newCore : newCores) {
newCore.close();
}
}
}
}
use of org.apache.solr.common.cloud.DocRouter in project lucene-solr by apache.
the class CloudSolrClient method directUpdate.
private NamedList<Object> directUpdate(AbstractUpdateRequest request, String collection) throws SolrServerException {
UpdateRequest updateRequest = (UpdateRequest) request;
ModifiableSolrParams params = (ModifiableSolrParams) request.getParams();
ModifiableSolrParams routableParams = new ModifiableSolrParams();
ModifiableSolrParams nonRoutableParams = new ModifiableSolrParams();
if (params != null) {
nonRoutableParams.add(params);
routableParams.add(params);
for (String param : NON_ROUTABLE_PARAMS) {
routableParams.remove(param);
}
}
if (collection == null) {
throw new SolrServerException("No collection param specified on request and no default collection has been set.");
}
//Check to see if the collection is an alias.
collection = stateProvider.getCollectionName(collection);
DocCollection col = getDocCollection(collection, null);
DocRouter router = col.getRouter();
if (router instanceof ImplicitDocRouter) {
// short circuit as optimization
return null;
}
//Create the URL map, which is keyed on slice name.
//The value is a list of URLs for each replica in the slice.
//The first value in the list is the leader for the slice.
final Map<String, List<String>> urlMap = buildUrlMap(col);
final Map<String, LBHttpSolrClient.Req> routes = (urlMap == null ? null : updateRequest.getRoutes(router, col, urlMap, routableParams, this.idField));
if (routes == null) {
if (directUpdatesToLeadersOnly && hasInfoToFindLeaders(updateRequest, idField)) {
// which to find the leaders but we could not find (all of) them
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "directUpdatesToLeadersOnly==true but could not find leader(s)");
} else {
// we could not find a leader or routes yet - use unoptimized general path
return null;
}
}
final NamedList<Throwable> exceptions = new NamedList<>();
// +1 for deleteQuery
final NamedList<NamedList> shardResponses = new NamedList<>(routes.size() + 1);
long start = System.nanoTime();
if (parallelUpdates) {
final Map<String, Future<NamedList<?>>> responseFutures = new HashMap<>(routes.size());
for (final Map.Entry<String, LBHttpSolrClient.Req> entry : routes.entrySet()) {
final String url = entry.getKey();
final LBHttpSolrClient.Req lbRequest = entry.getValue();
try {
MDC.put("CloudSolrClient.url", url);
responseFutures.put(url, threadPool.submit(() -> lbClient.request(lbRequest).getResponse()));
} finally {
MDC.remove("CloudSolrClient.url");
}
}
for (final Map.Entry<String, Future<NamedList<?>>> entry : responseFutures.entrySet()) {
final String url = entry.getKey();
final Future<NamedList<?>> responseFuture = entry.getValue();
try {
shardResponses.add(url, responseFuture.get());
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
} catch (ExecutionException e) {
exceptions.add(url, e.getCause());
}
}
if (exceptions.size() > 0) {
Throwable firstException = exceptions.getVal(0);
if (firstException instanceof SolrException) {
SolrException e = (SolrException) firstException;
throw new RouteException(ErrorCode.getErrorCode(e.code()), exceptions, routes);
} else {
throw new RouteException(ErrorCode.SERVER_ERROR, exceptions, routes);
}
}
} else {
for (Map.Entry<String, LBHttpSolrClient.Req> entry : routes.entrySet()) {
String url = entry.getKey();
LBHttpSolrClient.Req lbRequest = entry.getValue();
try {
NamedList<Object> rsp = lbClient.request(lbRequest).getResponse();
shardResponses.add(url, rsp);
} catch (Exception e) {
if (e instanceof SolrException) {
throw (SolrException) e;
} else {
throw new SolrServerException(e);
}
}
}
}
UpdateRequest nonRoutableRequest = null;
List<String> deleteQuery = updateRequest.getDeleteQuery();
if (deleteQuery != null && deleteQuery.size() > 0) {
UpdateRequest deleteQueryRequest = new UpdateRequest();
deleteQueryRequest.setDeleteQuery(deleteQuery);
nonRoutableRequest = deleteQueryRequest;
}
Set<String> paramNames = nonRoutableParams.getParameterNames();
Set<String> intersection = new HashSet<>(paramNames);
intersection.retainAll(NON_ROUTABLE_PARAMS);
if (nonRoutableRequest != null || intersection.size() > 0) {
if (nonRoutableRequest == null) {
nonRoutableRequest = new UpdateRequest();
}
nonRoutableRequest.setParams(nonRoutableParams);
List<String> urlList = new ArrayList<>();
urlList.addAll(routes.keySet());
Collections.shuffle(urlList, rand);
LBHttpSolrClient.Req req = new LBHttpSolrClient.Req(nonRoutableRequest, urlList);
try {
LBHttpSolrClient.Rsp rsp = lbClient.request(req);
shardResponses.add(urlList.get(0), rsp.getResponse());
} catch (Exception e) {
throw new SolrException(ErrorCode.SERVER_ERROR, urlList.get(0), e);
}
}
long end = System.nanoTime();
RouteResponse rr = condenseResponse(shardResponses, (int) TimeUnit.MILLISECONDS.convert(end - start, TimeUnit.NANOSECONDS));
rr.setRouteResponses(shardResponses);
rr.setRoutes(routes);
return rr;
}
use of org.apache.solr.common.cloud.DocRouter in project lucene-solr by apache.
the class CloudSolrClientTest method testRouting.
@Test
public void testRouting() throws Exception {
AbstractUpdateRequest request = new UpdateRequest().add(id, "0", "a_t", "hello1").add(id, "2", "a_t", "hello2").setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true);
// Test single threaded routed updates for UpdateRequest
NamedList<Object> response = getRandomClient().request(request, COLLECTION);
if (getRandomClient().isDirectUpdatesToLeadersOnly()) {
checkSingleServer(response);
}
CloudSolrClient.RouteResponse rr = (CloudSolrClient.RouteResponse) response;
Map<String, LBHttpSolrClient.Req> routes = rr.getRoutes();
Iterator<Map.Entry<String, LBHttpSolrClient.Req>> it = routes.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<String, LBHttpSolrClient.Req> entry = it.next();
String url = entry.getKey();
UpdateRequest updateRequest = (UpdateRequest) entry.getValue().getRequest();
SolrInputDocument doc = updateRequest.getDocuments().get(0);
String id = doc.getField("id").getValue().toString();
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", "id:" + id);
params.add("distrib", "false");
QueryRequest queryRequest = new QueryRequest(params);
try (HttpSolrClient solrClient = getHttpSolrClient(url)) {
QueryResponse queryResponse = queryRequest.process(solrClient);
SolrDocumentList docList = queryResponse.getResults();
assertTrue(docList.getNumFound() == 1);
}
}
// Test the deleteById routing for UpdateRequest
final UpdateResponse uResponse = new UpdateRequest().deleteById("0").deleteById("2").commit(cluster.getSolrClient(), COLLECTION);
if (getRandomClient().isDirectUpdatesToLeadersOnly()) {
checkSingleServer(uResponse.getResponse());
}
QueryResponse qResponse = getRandomClient().query(COLLECTION, new SolrQuery("*:*"));
SolrDocumentList docs = qResponse.getResults();
assertEquals(0, docs.getNumFound());
// Test Multi-Threaded routed updates for UpdateRequest
try (CloudSolrClient threadedClient = getCloudSolrClient(cluster.getZkServer().getZkAddress())) {
threadedClient.setParallelUpdates(true);
threadedClient.setDefaultCollection(COLLECTION);
response = threadedClient.request(request);
if (threadedClient.isDirectUpdatesToLeadersOnly()) {
checkSingleServer(response);
}
rr = (CloudSolrClient.RouteResponse) response;
routes = rr.getRoutes();
it = routes.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<String, LBHttpSolrClient.Req> entry = it.next();
String url = entry.getKey();
UpdateRequest updateRequest = (UpdateRequest) entry.getValue().getRequest();
SolrInputDocument doc = updateRequest.getDocuments().get(0);
String id = doc.getField("id").getValue().toString();
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("q", "id:" + id);
params.add("distrib", "false");
QueryRequest queryRequest = new QueryRequest(params);
try (HttpSolrClient solrClient = getHttpSolrClient(url)) {
QueryResponse queryResponse = queryRequest.process(solrClient);
SolrDocumentList docList = queryResponse.getResults();
assertTrue(docList.getNumFound() == 1);
}
}
}
// Test that queries with _route_ params are routed by the client
// Track request counts on each node before query calls
ClusterState clusterState = cluster.getSolrClient().getZkStateReader().getClusterState();
DocCollection col = clusterState.getCollection(COLLECTION);
Map<String, Long> requestCountsMap = Maps.newHashMap();
for (Slice slice : col.getSlices()) {
for (Replica replica : slice.getReplicas()) {
String baseURL = (String) replica.get(ZkStateReader.BASE_URL_PROP);
requestCountsMap.put(baseURL, getNumRequests(baseURL, COLLECTION));
}
}
// Collect the base URLs of the replicas of shard that's expected to be hit
DocRouter router = col.getRouter();
Collection<Slice> expectedSlices = router.getSearchSlicesSingle("0", null, col);
Set<String> expectedBaseURLs = Sets.newHashSet();
for (Slice expectedSlice : expectedSlices) {
for (Replica replica : expectedSlice.getReplicas()) {
String baseURL = (String) replica.get(ZkStateReader.BASE_URL_PROP);
expectedBaseURLs.add(baseURL);
}
}
assertTrue("expected urls is not fewer than all urls! expected=" + expectedBaseURLs + "; all=" + requestCountsMap.keySet(), expectedBaseURLs.size() < requestCountsMap.size());
// Calculate a number of shard keys that route to the same shard.
int n;
if (TEST_NIGHTLY) {
n = random().nextInt(999) + 2;
} else {
n = random().nextInt(9) + 2;
}
List<String> sameShardRoutes = Lists.newArrayList();
sameShardRoutes.add("0");
for (int i = 1; i < n; i++) {
String shardKey = Integer.toString(i);
Collection<Slice> slices = router.getSearchSlicesSingle(shardKey, null, col);
log.info("Expected Slices {}", slices);
if (expectedSlices.equals(slices)) {
sameShardRoutes.add(shardKey);
}
}
assertTrue(sameShardRoutes.size() > 1);
// Do N queries with _route_ parameter to the same shard
for (int i = 0; i < n; i++) {
ModifiableSolrParams solrParams = new ModifiableSolrParams();
solrParams.set(CommonParams.Q, "*:*");
solrParams.set(ShardParams._ROUTE_, sameShardRoutes.get(random().nextInt(sameShardRoutes.size())));
log.info("output: {}", getRandomClient().query(COLLECTION, solrParams));
}
// Request counts increase from expected nodes should aggregate to 1000, while there should be
// no increase in unexpected nodes.
int increaseFromExpectedUrls = 0;
int increaseFromUnexpectedUrls = 0;
Map<String, Long> numRequestsToUnexpectedUrls = Maps.newHashMap();
for (Slice slice : col.getSlices()) {
for (Replica replica : slice.getReplicas()) {
String baseURL = (String) replica.get(ZkStateReader.BASE_URL_PROP);
Long prevNumRequests = requestCountsMap.get(baseURL);
Long curNumRequests = getNumRequests(baseURL, COLLECTION);
long delta = curNumRequests - prevNumRequests;
if (expectedBaseURLs.contains(baseURL)) {
increaseFromExpectedUrls += delta;
} else {
increaseFromUnexpectedUrls += delta;
numRequestsToUnexpectedUrls.put(baseURL, delta);
}
}
}
assertEquals("Unexpected number of requests to expected URLs", n, increaseFromExpectedUrls);
assertEquals("Unexpected number of requests to unexpected URLs: " + numRequestsToUnexpectedUrls, 0, increaseFromUnexpectedUrls);
}
Aggregations