use of org.apache.storm.shade.com.google.common.annotations.VisibleForTesting in project storm by apache.
the class FileBlobStoreImpl method getKeyDir.
@VisibleForTesting
File getKeyDir(String key) {
String hash = String.valueOf(Math.abs((long) key.hashCode()) % BUCKETS);
File ret = new File(new File(fullPath, hash), key);
LOG.debug("{} Looking for {} in {}", new Object[] { fullPath, key, hash });
return ret;
}
use of org.apache.storm.shade.com.google.common.annotations.VisibleForTesting in project storm by apache.
the class ConstraintSolverStrategy method validateSolution.
/**
* Determines if a scheduling is valid and all constraints are satisfied (for use in testing).
* This is done in three steps.
*
* <li>Check if nodeCoLocationCnt-constraints are satisfied. Some components may allow only a certain number of
* executors to exist on the same node {@link ConstraintSolverConfig#getMaxNodeCoLocationCnts()}.
* </li>
*
* <li>
* Check if incompatibility-constraints are satisfied. Incompatible components
* {@link ConstraintSolverConfig#getIncompatibleComponentSets()} should not be put on the same worker.
* </li>
*
* <li>
* Check if CPU and Memory resources do not exceed availability on the node and total matches what is expected
* when fully scheduled.
* </li>
*
* @param cluster on which scheduling was done.
* @param topo TopologyDetails being scheduled.
* @return true if solution is valid, false otherwise.
*/
@VisibleForTesting
public static boolean validateSolution(Cluster cluster, TopologyDetails topo) {
assert (cluster.getAssignmentById(topo.getId()) != null);
LOG.debug("Checking for a valid scheduling for topology {}...", topo.getName());
ConstraintSolverConfig constraintSolverConfig = new ConstraintSolverConfig(topo);
// First check NodeCoLocationCnt constraints
Map<ExecutorDetails, String> execToComp = topo.getExecutorToComponent();
// this is the critical count
Map<String, Map<String, Integer>> nodeCompMap = new HashMap<>();
Map<WorkerSlot, RasNode> workerToNodes = new HashMap<>();
RasNodes.getAllNodesFrom(cluster).values().forEach(node -> node.getUsedSlots().forEach(workerSlot -> workerToNodes.put(workerSlot, node)));
List<String> errors = new ArrayList<>();
for (Map.Entry<ExecutorDetails, WorkerSlot> entry : cluster.getAssignmentById(topo.getId()).getExecutorToSlot().entrySet()) {
ExecutorDetails exec = entry.getKey();
String comp = execToComp.get(exec);
WorkerSlot worker = entry.getValue();
RasNode node = workerToNodes.get(worker);
String nodeId = node.getId();
if (!constraintSolverConfig.getMaxNodeCoLocationCnts().containsKey(comp)) {
continue;
}
int allowedColocationMaxCnt = constraintSolverConfig.getMaxNodeCoLocationCnts().get(comp);
Map<String, Integer> oneNodeCompMap = nodeCompMap.computeIfAbsent(nodeId, (k) -> new HashMap<>());
oneNodeCompMap.put(comp, oneNodeCompMap.getOrDefault(comp, 0) + 1);
if (allowedColocationMaxCnt < oneNodeCompMap.get(comp)) {
String err = String.format("MaxNodeCoLocation: Component %s (exec=%s) on node %s, cnt %d > allowed %d", comp, exec, nodeId, oneNodeCompMap.get(comp), allowedColocationMaxCnt);
errors.add(err);
}
}
// Second check IncompatibileComponent Constraints
Map<WorkerSlot, Set<String>> workerCompMap = new HashMap<>();
cluster.getAssignmentById(topo.getId()).getExecutorToSlot().forEach((exec, worker) -> {
String comp = execToComp.get(exec);
workerCompMap.computeIfAbsent(worker, (k) -> new HashSet<>()).add(comp);
});
for (Map.Entry<WorkerSlot, Set<String>> entry : workerCompMap.entrySet()) {
Set<String> comps = entry.getValue();
for (String comp1 : comps) {
for (String comp2 : comps) {
if (!comp1.equals(comp2) && constraintSolverConfig.getIncompatibleComponentSets().containsKey(comp1) && constraintSolverConfig.getIncompatibleComponentSets().get(comp1).contains(comp2)) {
String err = String.format("IncompatibleComponents: %s and %s on WorkerSlot: %s", comp1, comp2, entry.getKey());
errors.add(err);
}
}
}
}
// Third check resources
SchedulerAssignment schedulerAssignment = cluster.getAssignmentById(topo.getId());
Map<ExecutorDetails, WorkerSlot> execToWorker = new HashMap<>();
if (schedulerAssignment.getExecutorToSlot() != null) {
execToWorker.putAll(schedulerAssignment.getExecutorToSlot());
}
Map<String, RasNode> nodes = RasNodes.getAllNodesFrom(cluster);
Map<RasNode, Collection<ExecutorDetails>> nodeToExecs = new HashMap<>();
for (Map.Entry<ExecutorDetails, WorkerSlot> entry : execToWorker.entrySet()) {
ExecutorDetails exec = entry.getKey();
WorkerSlot worker = entry.getValue();
RasNode node = nodes.get(worker.getNodeId());
if (node.getAvailableMemoryResources() < 0.0) {
String err = String.format("Resource Exhausted: Found node %s with negative available memory %,.2f", node.getId(), node.getAvailableMemoryResources());
errors.add(err);
continue;
}
if (node.getAvailableCpuResources() < 0.0) {
String err = String.format("Resource Exhausted: Found node %s with negative available CPU %,.2f", node.getId(), node.getAvailableCpuResources());
errors.add(err);
continue;
}
nodeToExecs.computeIfAbsent(node, (k) -> new HashSet<>()).add(exec);
}
for (Map.Entry<RasNode, Collection<ExecutorDetails>> entry : nodeToExecs.entrySet()) {
RasNode node = entry.getKey();
Collection<ExecutorDetails> execs = entry.getValue();
double cpuUsed = 0.0;
double memoryUsed = 0.0;
for (ExecutorDetails exec : execs) {
cpuUsed += topo.getTotalCpuReqTask(exec);
memoryUsed += topo.getTotalMemReqTask(exec);
}
if (node.getAvailableCpuResources() != (node.getTotalCpuResources() - cpuUsed)) {
String err = String.format("Incorrect CPU Resources: Node %s CPU available is %,.2f, expected %,.2f, " + "Executors scheduled on node: %s", node.getId(), node.getAvailableCpuResources(), (node.getTotalCpuResources() - cpuUsed), execs);
errors.add(err);
}
if (node.getAvailableMemoryResources() != (node.getTotalMemoryResources() - memoryUsed)) {
String err = String.format("Incorrect Memory Resources: Node %s Memory available is %,.2f, expected %,.2f, " + "Executors scheduled on node: %s", node.getId(), node.getAvailableMemoryResources(), (node.getTotalMemoryResources() - memoryUsed), execs);
errors.add(err);
}
}
if (!errors.isEmpty()) {
LOG.error("Topology {} solution is invalid\n\t{}", topo.getName(), String.join("\n\t", errors));
}
return errors.isEmpty();
}
use of org.apache.storm.shade.com.google.common.annotations.VisibleForTesting in project storm by apache.
the class Nimbus method rmTopologyKeys.
@VisibleForTesting
public void rmTopologyKeys(String topoId) {
BlobStore store = blobStore;
IStormClusterState state = stormClusterState;
try {
topoCache.deleteTopoConf(topoId, NIMBUS_SUBJECT);
} catch (Exception e) {
// Just go on and try to delete the others
}
try {
topoCache.deleteTopology(topoId, NIMBUS_SUBJECT);
} catch (Exception e) {
// Just go on and try to delte the others
}
rmBlobKey(store, ConfigUtils.masterStormJarKey(topoId), state);
}
use of org.apache.storm.shade.com.google.common.annotations.VisibleForTesting in project storm by apache.
the class Nimbus method doCleanup.
@VisibleForTesting
public void doCleanup() throws Exception {
if (!isLeader()) {
LOG.info("not a leader, skipping cleanup");
return;
}
IStormClusterState state = stormClusterState;
Set<String> toClean;
synchronized (submitLock) {
toClean = topoIdsToClean(state, blobStore, this.conf);
}
if (toClean != null) {
for (String topoId : toClean) {
LOG.info("Cleaning up {}", topoId);
state.teardownHeartbeats(topoId);
state.teardownTopologyErrors(topoId);
state.removeAllPrivateWorkerKeys(topoId);
state.removeBackpressure(topoId);
rmDependencyJarsInTopology(topoId);
forceDeleteTopoDistDir(topoId);
rmTopologyKeys(topoId);
heartbeatsCache.removeTopo(topoId);
idToExecutors.getAndUpdate(new Dissoc<>(topoId));
}
}
}
use of org.apache.storm.shade.com.google.common.annotations.VisibleForTesting in project storm by apache.
the class Supervisor method checkAuthorization.
@VisibleForTesting
public void checkAuthorization(String topoName, Map<String, Object> topoConf, String operation, ReqContext context) throws AuthorizationException {
if (context == null) {
context = ReqContext.context();
}
Map<String, Object> checkConf = new HashMap<>();
if (topoConf != null) {
checkConf.putAll(topoConf);
} else if (topoName != null) {
checkConf.put(Config.TOPOLOGY_NAME, topoName);
}
if (context.isImpersonating()) {
LOG.info("principal: {} is trying to impersonate principal: {}", context.realPrincipal(), context.principal());
throw new WrappedAuthorizationException("Supervisor does not support impersonation");
}
IAuthorizer aclHandler = authorizationHandler;
if (aclHandler != null) {
if (!aclHandler.permit(context, operation, checkConf)) {
ThriftAccessLogger.logAccess(context.requestID(), context.remoteAddress(), context.principal(), operation, topoName, "access-denied");
throw new WrappedAuthorizationException(operation + (topoName != null ? " on topology " + topoName : "") + " is not authorized");
} else {
ThriftAccessLogger.logAccess(context.requestID(), context.remoteAddress(), context.principal(), operation, topoName, "access-granted");
}
}
}
Aggregations