use of org.apache.solr.client.solrj.request.IsUpdateRequest in project lucene-solr by apache.
the class CloudSolrClient method sendRequest.
protected NamedList<Object> sendRequest(SolrRequest request, String collection) throws SolrServerException, IOException {
connect();
boolean sendToLeaders = false;
List<String> replicas = null;
if (request instanceof IsUpdateRequest) {
if (request instanceof UpdateRequest) {
NamedList<Object> response = directUpdate((AbstractUpdateRequest) request, collection);
if (response != null) {
return response;
}
}
sendToLeaders = true;
replicas = new ArrayList<>();
}
SolrParams reqParams = request.getParams();
if (reqParams == null) {
reqParams = new ModifiableSolrParams();
}
List<String> theUrlList = new ArrayList<>();
if (request instanceof V2Request) {
Set<String> liveNodes = stateProvider.liveNodes();
if (!liveNodes.isEmpty()) {
List<String> liveNodesList = new ArrayList<>(liveNodes);
Collections.shuffle(liveNodesList, rand);
theUrlList.add(ZkStateReader.getBaseUrlForNodeName(liveNodesList.get(0), (String) stateProvider.getClusterProperty(ZkStateReader.URL_SCHEME, "http")));
}
} else if (ADMIN_PATHS.contains(request.getPath())) {
Set<String> liveNodes = stateProvider.liveNodes();
for (String liveNode : liveNodes) {
theUrlList.add(ZkStateReader.getBaseUrlForNodeName(liveNode, (String) stateProvider.getClusterProperty(ZkStateReader.URL_SCHEME, "http")));
}
} else {
if (collection == null) {
throw new SolrServerException("No collection param specified on request and no default collection has been set.");
}
Set<String> collectionNames = getCollectionNames(collection);
if (collectionNames.size() == 0) {
throw new SolrException(ErrorCode.BAD_REQUEST, "Could not find collection: " + collection);
}
String shardKeys = reqParams.get(ShardParams._ROUTE_);
// TODO: not a big deal because of the caching, but we could avoid looking
// at every shard
// when getting leaders if we tweaked some things
// Retrieve slices from the cloud state and, for each collection
// specified,
// add it to the Map of slices.
Map<String, Slice> slices = new HashMap<>();
for (String collectionName : collectionNames) {
DocCollection col = getDocCollection(collectionName, null);
Collection<Slice> routeSlices = col.getRouter().getSearchSlices(shardKeys, reqParams, col);
ClientUtils.addSlices(slices, collectionName, routeSlices, true);
}
Set<String> liveNodes = stateProvider.liveNodes();
List<String> leaderUrlList = null;
List<String> urlList = null;
List<String> replicasList = null;
// build a map of unique nodes
// TODO: allow filtering by group, role, etc
Map<String, ZkNodeProps> nodes = new HashMap<>();
List<String> urlList2 = new ArrayList<>();
for (Slice slice : slices.values()) {
for (ZkNodeProps nodeProps : slice.getReplicasMap().values()) {
ZkCoreNodeProps coreNodeProps = new ZkCoreNodeProps(nodeProps);
String node = coreNodeProps.getNodeName();
if (!liveNodes.contains(coreNodeProps.getNodeName()) || Replica.State.getState(coreNodeProps.getState()) != Replica.State.ACTIVE)
continue;
if (nodes.put(node, nodeProps) == null) {
if (!sendToLeaders || coreNodeProps.isLeader()) {
String url;
if (reqParams.get(UpdateParams.COLLECTION) == null) {
url = ZkCoreNodeProps.getCoreUrl(nodeProps.getStr(ZkStateReader.BASE_URL_PROP), collection);
} else {
url = coreNodeProps.getCoreUrl();
}
urlList2.add(url);
} else {
String url;
if (reqParams.get(UpdateParams.COLLECTION) == null) {
url = ZkCoreNodeProps.getCoreUrl(nodeProps.getStr(ZkStateReader.BASE_URL_PROP), collection);
} else {
url = coreNodeProps.getCoreUrl();
}
replicas.add(url);
}
}
}
}
if (sendToLeaders) {
leaderUrlList = urlList2;
replicasList = replicas;
} else {
urlList = urlList2;
}
if (sendToLeaders) {
theUrlList = new ArrayList<>(leaderUrlList.size());
theUrlList.addAll(leaderUrlList);
} else {
theUrlList = new ArrayList<>(urlList.size());
theUrlList.addAll(urlList);
}
Collections.shuffle(theUrlList, rand);
if (sendToLeaders) {
ArrayList<String> theReplicas = new ArrayList<>(replicasList.size());
theReplicas.addAll(replicasList);
Collections.shuffle(theReplicas, rand);
theUrlList.addAll(theReplicas);
}
if (theUrlList.isEmpty()) {
for (String s : collectionNames) {
if (s != null)
collectionStateCache.remove(s);
}
throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "Could not find a healthy node to handle the request.");
}
}
LBHttpSolrClient.Req req = new LBHttpSolrClient.Req(request, theUrlList);
LBHttpSolrClient.Rsp rsp = lbClient.request(req);
return rsp.getResponse();
}
use of org.apache.solr.client.solrj.request.IsUpdateRequest in project lucene-solr by apache.
the class LBHttpSolrClient method request.
/**
* Tries to query a live server from the list provided in Req. Servers in the dead pool are skipped.
* If a request fails due to an IOException, the server is moved to the dead pool for a certain period of
* time, or until a test request on that server succeeds.
*
* Servers are queried in the exact order given (except servers currently in the dead pool are skipped).
* If no live servers from the provided list remain to be tried, a number of previously skipped dead servers will be tried.
* Req.getNumDeadServersToTry() controls how many dead servers will be tried.
*
* If no live servers are found a SolrServerException is thrown.
*
* @param req contains both the request as well as the list of servers to query
*
* @return the result of the request
*
* @throws IOException If there is a low-level I/O error.
*/
public Rsp request(Req req) throws SolrServerException, IOException {
Rsp rsp = new Rsp();
Exception ex = null;
boolean isNonRetryable = req.request instanceof IsUpdateRequest || ADMIN_PATHS.contains(req.request.getPath());
List<ServerWrapper> skipped = null;
final Integer numServersToTry = req.getNumServersToTry();
int numServersTried = 0;
boolean timeAllowedExceeded = false;
long timeAllowedNano = getTimeAllowedInNanos(req.getRequest());
long timeOutTime = System.nanoTime() + timeAllowedNano;
for (String serverStr : req.getServers()) {
if (timeAllowedExceeded = isTimeExceeded(timeAllowedNano, timeOutTime)) {
break;
}
serverStr = normalize(serverStr);
// if the server is currently a zombie, just skip to the next one
ServerWrapper wrapper = zombieServers.get(serverStr);
if (wrapper != null) {
// System.out.println("ZOMBIE SERVER QUERIED: " + serverStr);
final int numDeadServersToTry = req.getNumDeadServersToTry();
if (numDeadServersToTry > 0) {
if (skipped == null) {
skipped = new ArrayList<>(numDeadServersToTry);
skipped.add(wrapper);
} else if (skipped.size() < numDeadServersToTry) {
skipped.add(wrapper);
}
}
continue;
}
try {
MDC.put("LBHttpSolrClient.url", serverStr);
if (numServersToTry != null && numServersTried > numServersToTry.intValue()) {
break;
}
HttpSolrClient client = makeSolrClient(serverStr);
++numServersTried;
ex = doRequest(client, req, rsp, isNonRetryable, false, null);
if (ex == null) {
// SUCCESS
return rsp;
}
} finally {
MDC.remove("LBHttpSolrClient.url");
}
}
// try the servers we previously skipped
if (skipped != null) {
for (ServerWrapper wrapper : skipped) {
if (timeAllowedExceeded = isTimeExceeded(timeAllowedNano, timeOutTime)) {
break;
}
if (numServersToTry != null && numServersTried > numServersToTry.intValue()) {
break;
}
try {
MDC.put("LBHttpSolrClient.url", wrapper.client.getBaseURL());
++numServersTried;
ex = doRequest(wrapper.client, req, rsp, isNonRetryable, true, wrapper.getKey());
if (ex == null) {
// SUCCESS
return rsp;
}
} finally {
MDC.remove("LBHttpSolrClient.url");
}
}
}
final String solrServerExceptionMessage;
if (timeAllowedExceeded) {
solrServerExceptionMessage = "Time allowed to handle this request exceeded";
} else {
if (numServersToTry != null && numServersTried > numServersToTry.intValue()) {
solrServerExceptionMessage = "No live SolrServers available to handle this request:" + " numServersTried=" + numServersTried + " numServersToTry=" + numServersToTry.intValue();
} else {
solrServerExceptionMessage = "No live SolrServers available to handle this request";
}
}
if (ex == null) {
throw new SolrServerException(solrServerExceptionMessage);
} else {
throw new SolrServerException(solrServerExceptionMessage + ":" + zombieServers.keySet(), ex);
}
}
Aggregations