use of org.apache.storm.scheduler.WorkerSlot in project storm by apache.
the class Node method assign.
/**
* Assign a free slot on the node to the following topology and executors.
* This will update the cluster too.
* @param topId the topology to assign a free slot to.
* @param executors the executors to run in that slot.
* @param cluster the cluster to be updated
*/
public void assign(String topId, Collection<ExecutorDetails> executors, Cluster cluster) {
if (!_isAlive) {
throw new IllegalStateException("Trying to adding to a dead node " + _nodeId);
}
if (_freeSlots.isEmpty()) {
throw new IllegalStateException("Trying to assign to a full node " + _nodeId);
}
if (executors.size() == 0) {
LOG.warn("Trying to assign nothing from " + topId + " to " + _nodeId + " (Ignored)");
} else {
WorkerSlot slot = _freeSlots.iterator().next();
cluster.assign(slot, topId, executors);
assignInternal(slot, topId, false);
}
}
use of org.apache.storm.scheduler.WorkerSlot in project storm by apache.
the class RAS_Node method getExecutors.
public static Collection<ExecutorDetails> getExecutors(WorkerSlot ws, Cluster cluster) {
Collection<ExecutorDetails> retList = new ArrayList<ExecutorDetails>();
for (Entry<String, SchedulerAssignment> entry : cluster.getAssignments().entrySet()) {
Map<ExecutorDetails, WorkerSlot> executorToSlot = entry.getValue().getExecutorToSlot();
for (Map.Entry<ExecutorDetails, WorkerSlot> execToSlot : executorToSlot.entrySet()) {
WorkerSlot slot = execToSlot.getValue();
if (ws.getPort() == slot.getPort() && ws.getNodeId().equals(slot.getNodeId())) {
ExecutorDetails exec = execToSlot.getKey();
retList.add(exec);
}
}
}
return retList;
}
use of org.apache.storm.scheduler.WorkerSlot in project storm by apache.
the class Nimbus method readAllSupervisorDetails.
/**
* @param superToDeadPorts dead ports on the supervisor
* @param topologies all of the topologies
* @param missingAssignmentTopologies topologies that need assignments
* @return a map: {supervisor-id SupervisorDetails}
*/
private Map<String, SupervisorDetails> readAllSupervisorDetails(Map<String, Set<Long>> superToDeadPorts, Topologies topologies, Collection<String> missingAssignmentTopologies) {
Map<String, SupervisorDetails> ret = new HashMap<>();
IStormClusterState state = stormClusterState;
Map<String, SupervisorInfo> superInfos = state.allSupervisorInfo();
List<SupervisorDetails> superDetails = new ArrayList<>();
for (Entry<String, SupervisorInfo> entry : superInfos.entrySet()) {
SupervisorInfo info = entry.getValue();
superDetails.add(new SupervisorDetails(entry.getKey(), info.get_meta(), info.get_resources_map()));
}
// Note that allSlotsAvailableForScheduling
// only uses the supervisor-details. The rest of the arguments
// are there to satisfy the INimbus interface.
Map<String, Set<Long>> superToPorts = new HashMap<>();
for (WorkerSlot slot : inimbus.allSlotsAvailableForScheduling(superDetails, topologies, new HashSet<>(missingAssignmentTopologies))) {
String superId = slot.getNodeId();
Set<Long> ports = superToPorts.get(superId);
if (ports == null) {
ports = new HashSet<>();
superToPorts.put(superId, ports);
}
ports.add((long) slot.getPort());
}
for (Entry<String, SupervisorInfo> entry : superInfos.entrySet()) {
String superId = entry.getKey();
SupervisorInfo info = entry.getValue();
String hostname = info.get_hostname();
// Hide the dead-ports from the all-ports
// these dead-ports can be reused in next round of assignments
Set<Long> deadPorts = superToDeadPorts.get(superId);
Set<Long> allPorts = superToPorts.get(superId);
if (allPorts == null) {
allPorts = new HashSet<>();
} else {
allPorts = new HashSet<>(allPorts);
}
if (deadPorts != null) {
allPorts.removeAll(deadPorts);
}
ret.put(superId, new SupervisorDetails(superId, hostname, info.get_scheduler_meta(), allPorts, info.get_resources_map()));
}
return ret;
}
use of org.apache.storm.scheduler.WorkerSlot in project storm by apache.
the class Nimbus method getWorkerResourcesForTopology.
private Map<WorkerSlot, WorkerResources> getWorkerResourcesForTopology(String topoId) {
Map<WorkerSlot, WorkerResources> ret = idToWorkerResources.get().get(topoId);
if (ret == null) {
IStormClusterState state = stormClusterState;
ret = new HashMap<>();
Assignment assignment = state.assignmentInfo(topoId, null);
if (assignment != null && assignment.is_set_worker_resources()) {
for (Entry<NodeInfo, WorkerResources> entry : assignment.get_worker_resources().entrySet()) {
NodeInfo ni = entry.getKey();
WorkerSlot slot = new WorkerSlot(ni.get_node(), ni.get_port_iterator().next());
ret.put(slot, entry.getValue());
}
idToWorkerResources.getAndUpdate(new Assoc<>(topoId, ret));
}
}
return ret;
}
use of org.apache.storm.scheduler.WorkerSlot in project storm by apache.
the class Nimbus method mkAssignments.
private void mkAssignments(String scratchTopoId) throws Exception {
if (!isLeader()) {
LOG.info("not a leader, skipping assignments");
return;
}
// get existing assignment (just the topologyToExecutorToNodePort map) -> default to {}
// filter out ones which have a executor timeout
// figure out available slots on cluster. add to that the used valid slots to get total slots. figure out how many executors should be in each slot (e.g., 4, 4, 4, 5)
// only keep existing slots that satisfy one of those slots. for rest, reassign them across remaining slots
// edge case for slots with no executor timeout but with supervisor timeout... just treat these as valid slots that can be reassigned to. worst comes to worse the executor will timeout and won't assign here next time around
IStormClusterState state = stormClusterState;
//read all the topologies
Map<String, StormBase> bases;
Map<String, TopologyDetails> tds = new HashMap<>();
synchronized (submitLock) {
bases = state.topologyBases();
for (Iterator<Entry<String, StormBase>> it = bases.entrySet().iterator(); it.hasNext(); ) {
Entry<String, StormBase> entry = it.next();
String id = entry.getKey();
try {
tds.put(id, readTopologyDetails(id, entry.getValue()));
} catch (KeyNotFoundException e) {
//A race happened and it is probably not running
it.remove();
}
}
}
Topologies topologies = new Topologies(tds);
List<String> assignedTopologyIds = state.assignments(null);
Map<String, Assignment> existingAssignments = new HashMap<>();
for (String id : assignedTopologyIds) {
// will be treated as free slot in the scheduler code.
if (!id.equals(scratchTopoId)) {
existingAssignments.put(id, state.assignmentInfo(id, null));
}
}
// make the new assignments for topologies
Map<String, SchedulerAssignment> newSchedulerAssignments = null;
synchronized (schedLock) {
newSchedulerAssignments = computeNewSchedulerAssignments(existingAssignments, topologies, bases, scratchTopoId);
Map<String, Map<List<Long>, List<Object>>> topologyToExecutorToNodePort = computeNewTopoToExecToNodePort(newSchedulerAssignments, existingAssignments);
for (String id : assignedTopologyIds) {
if (!topologyToExecutorToNodePort.containsKey(id)) {
topologyToExecutorToNodePort.put(id, null);
}
}
Map<String, Map<List<Object>, List<Double>>> newAssignedWorkerToResources = computeTopoToNodePortToResources(newSchedulerAssignments);
int nowSecs = Time.currentTimeSecs();
Map<String, SupervisorDetails> basicSupervisorDetailsMap = basicSupervisorDetailsMap(state);
//construct the final Assignments by adding start-times etc into it
Map<String, Assignment> newAssignments = new HashMap<>();
for (Entry<String, Map<List<Long>, List<Object>>> entry : topologyToExecutorToNodePort.entrySet()) {
String topoId = entry.getKey();
Map<List<Long>, List<Object>> execToNodePort = entry.getValue();
Assignment existingAssignment = existingAssignments.get(topoId);
Set<String> allNodes = new HashSet<>();
if (execToNodePort != null) {
for (List<Object> nodePort : execToNodePort.values()) {
allNodes.add((String) nodePort.get(0));
}
}
Map<String, String> allNodeHost = new HashMap<>();
if (existingAssignment != null) {
allNodeHost.putAll(existingAssignment.get_node_host());
}
for (String node : allNodes) {
String host = inimbus.getHostName(basicSupervisorDetailsMap, node);
if (host != null) {
allNodeHost.put(node, host);
}
}
Map<List<Long>, NodeInfo> execNodeInfo = null;
if (existingAssignment != null) {
execNodeInfo = existingAssignment.get_executor_node_port();
}
List<List<Long>> reassignExecutors = changedExecutors(execNodeInfo, execToNodePort);
Map<List<Long>, Long> startTimes = new HashMap<>();
if (existingAssignment != null) {
startTimes.putAll(existingAssignment.get_executor_start_time_secs());
}
for (List<Long> id : reassignExecutors) {
startTimes.put(id, (long) nowSecs);
}
Map<List<Object>, List<Double>> workerToResources = newAssignedWorkerToResources.get(topoId);
Assignment newAssignment = new Assignment((String) conf.get(Config.STORM_LOCAL_DIR));
Map<String, String> justAssignedKeys = new HashMap<>(allNodeHost);
//Modifies justAssignedKeys
justAssignedKeys.keySet().retainAll(allNodes);
newAssignment.set_node_host(justAssignedKeys);
//convert NodePort to NodeInfo (again!!!).
Map<List<Long>, NodeInfo> execToNodeInfo = new HashMap<>();
for (Entry<List<Long>, List<Object>> execAndNodePort : execToNodePort.entrySet()) {
List<Object> nodePort = execAndNodePort.getValue();
NodeInfo ni = new NodeInfo();
ni.set_node((String) nodePort.get(0));
ni.add_to_port((Long) nodePort.get(1));
execToNodeInfo.put(execAndNodePort.getKey(), ni);
}
newAssignment.set_executor_node_port(execToNodeInfo);
newAssignment.set_executor_start_time_secs(startTimes);
//do another conversion (lets just make this all common)
Map<NodeInfo, WorkerResources> workerResources = new HashMap<>();
for (Entry<List<Object>, List<Double>> wr : workerToResources.entrySet()) {
List<Object> nodePort = wr.getKey();
NodeInfo ni = new NodeInfo();
ni.set_node((String) nodePort.get(0));
ni.add_to_port((Long) nodePort.get(1));
List<Double> r = wr.getValue();
WorkerResources resources = new WorkerResources();
resources.set_mem_on_heap(r.get(0));
resources.set_mem_off_heap(r.get(1));
resources.set_cpu(r.get(2));
workerResources.put(ni, resources);
}
newAssignment.set_worker_resources(workerResources);
newAssignments.put(topoId, newAssignment);
}
if (!newAssignments.equals(existingAssignments)) {
LOG.debug("RESETTING id->resources and id->worker-resources cache!");
idToResources.set(new HashMap<>());
idToWorkerResources.set(new HashMap<>());
}
// only log/set when there's been a change to the assignment
for (Entry<String, Assignment> entry : newAssignments.entrySet()) {
String topoId = entry.getKey();
Assignment assignment = entry.getValue();
Assignment existingAssignment = existingAssignments.get(topoId);
//NOT Used TopologyDetails topologyDetails = topologies.getById(topoId);
if (assignment.equals(existingAssignment)) {
LOG.debug("Assignment for {} hasn't changed", topoId);
} else {
LOG.info("Setting new assignment for topology id {}: {}", topoId, assignment);
state.setAssignment(topoId, assignment);
}
}
Map<String, Collection<WorkerSlot>> addedSlots = new HashMap<>();
for (Entry<String, Assignment> entry : newAssignments.entrySet()) {
String topoId = entry.getKey();
Assignment assignment = entry.getValue();
Assignment existingAssignment = existingAssignments.get(topoId);
if (existingAssignment == null) {
existingAssignment = new Assignment();
existingAssignment.set_executor_node_port(new HashMap<>());
existingAssignment.set_executor_start_time_secs(new HashMap<>());
}
Set<WorkerSlot> newSlots = newlyAddedSlots(existingAssignment, assignment);
addedSlots.put(topoId, newSlots);
}
inimbus.assignSlots(topologies, addedSlots);
}
}
Aggregations