use of backtype.storm.scheduler.WorkerSlot in project jstorm by alibaba.
the class BaseExecutors method getConnection.
protected IConnection getConnection(int taskId) {
IConnection conn = null;
WorkerSlot nodePort = task.getTaskNodeport().get(taskId);
if (nodePort != null) {
conn = task.getNodeportSocket().get(nodePort);
}
return conn;
}
use of backtype.storm.scheduler.WorkerSlot in project jstorm by alibaba.
the class MkShuffer method refreshTasks.
public void refreshTasks() {
isInterShuffle = ConfigExtension.getShuffleEnableInterPath(workerData.getStormConf());
loadMark = (float) ConfigExtension.getShuffleInterLoadMark(workerData.getStormConf());
Set<Integer> localNodeTasks = workerData.getLocalNodeTasks();
if (oldLocalNodeTasks != null && oldLocalNodeTasks.equals(localNodeTasks)) {
return;
} else {
oldLocalNodeTasks = new HashSet<Integer>(localNodeTasks);
}
taskNodeport = workerData.getTaskNodeport();
nodeportSocket = workerData.getNodeportSocket();
Set<Integer> localWorkerTaskSet = workerData.getTaskids();
Map<String, List<Integer>> componentTasks = workerData.getComponentToSortedTasks();
Set<Integer> sourceTasks = JStormUtils.listToSet(componentTasks.get(sourceComponent));
Set<Integer> targetTasks = JStormUtils.listToSet(componentTasks.get(targetComponent));
ArrayList<Integer> localWorkerTasksTmp = new ArrayList<Integer>();
ArrayList<Integer> localNodeTasksTmp = new ArrayList<Integer>();
ArrayList<Integer> otherNodeTasksTmp = new ArrayList<Integer>();
for (Integer tasks : targetTasks) {
if (localWorkerTaskSet.contains(tasks)) {
localWorkerTasksTmp.add(tasks);
} else if (localNodeTasks.contains(tasks)) {
localNodeTasksTmp.add(tasks);
} else {
otherNodeTasksTmp.add(tasks);
}
}
if (this.localWorkerTasks == null) {
this.localWorkerTasks = localWorkerTasksTmp;
localWorkerTaskSize = this.localWorkerTasks.size();
}
if (isInterShuffle == false) {
localWorkerTaskSize = 0;
outWorkerTasks = JStormUtils.mk_list(targetTasks);
outWorkerTaskSize = outWorkerTasks.size();
return;
}
// the left logic is when isInterShuffle is true
Set<String> sourceHosts = new HashSet<String>();
Set<String> targetHosts = new HashSet<String>();
for (Entry<Integer, WorkerSlot> entry : taskNodeport.entrySet()) {
Integer task = entry.getKey();
WorkerSlot workerSlot = entry.getValue();
String host = workerSlot.getNodeId();
if (sourceTasks.contains(task)) {
sourceHosts.add(host);
} else if (targetTasks.contains(task)) {
targetHosts.add(host);
}
}
LOG.info("{} hosts {} tasks {}, {} hosts {} tasks {}", sourceComponent, sourceHosts, sourceTasks, targetComponent, targetHosts, targetTasks);
double localNodePriority = 2.0;
if (targetHosts.equals(sourceHosts) && targetHosts.size() > 0) {
// due to every node's has the source, double the priority
localNodePriority *= 2;
}
if (localWorkerTasksTmp.size() != 0) {
//due to current worker will comsume much cpu, so reduce priority
localNodePriority /= 2;
}
ArrayList<Integer> outWorkerTasksTmp = new ArrayList<Integer>();
outWorkerTasksTmp.addAll(localNodeTasksTmp);
outWorkerTasksTmp.addAll(otherNodeTasksTmp);
for (int i = 1; i < localNodePriority; i++) {
outWorkerTasksTmp.addAll(localNodeTasksTmp);
}
this.outWorkerTasks = outWorkerTasksTmp;
outWorkerTaskSize = outWorkerTasks.size();
LOG.info("Source:{}, target:{}, localTasks:{}, outTasks:{}", sourceComponent, targetComponent, localWorkerTasks, outWorkerTasks);
}
use of backtype.storm.scheduler.WorkerSlot in project storm-mesos by nathanmarz.
the class MesosNimbus method assignSlots.
@Override
public void assignSlots(Topologies topologies, Map<String, Collection<WorkerSlot>> slots) {
synchronized (OFFERS_LOCK) {
Map<OfferID, List<TaskInfo>> toLaunch = new HashMap();
for (String topologyId : slots.keySet()) {
for (WorkerSlot slot : slots.get(topologyId)) {
OfferID id = findOffer(slot);
Offer offer = _offers.get(id);
if (id != null) {
if (!toLaunch.containsKey(id)) {
toLaunch.put(id, new ArrayList());
}
TopologyDetails details = topologies.getById(topologyId);
int cpu = MesosCommon.topologyCpu(_conf, details);
int mem = MesosCommon.topologyMem(_conf, details);
Map executorData = new HashMap();
executorData.put(MesosCommon.SUPERVISOR_ID, slot.getNodeId() + "-" + details.getId());
executorData.put(MesosCommon.ASSIGNMENT_ID, slot.getNodeId());
String executorDataStr = JSONValue.toJSONString(executorData);
LOG.info("Launching task with executor data: <" + executorDataStr + ">");
TaskInfo task = TaskInfo.newBuilder().setName("worker " + slot.getNodeId() + ":" + slot.getPort()).setTaskId(TaskID.newBuilder().setValue(MesosCommon.taskId(slot.getNodeId(), slot.getPort()))).setSlaveId(offer.getSlaveId()).setExecutor(ExecutorInfo.newBuilder().setExecutorId(ExecutorID.newBuilder().setValue(details.getId())).setData(ByteString.copyFromUtf8(executorDataStr)).setCommand(CommandInfo.newBuilder().addUris(URI.newBuilder().setValue((String) _conf.get(CONF_EXECUTOR_URI))).setValue("cd storm-mesos* && python bin/storm-mesos supervisor"))).addResources(Resource.newBuilder().setName("cpus").setType(Type.SCALAR).setScalar(Scalar.newBuilder().setValue(cpu))).addResources(Resource.newBuilder().setName("mem").setType(Type.SCALAR).setScalar(Scalar.newBuilder().setValue(mem))).addResources(Resource.newBuilder().setName("ports").setType(Type.RANGES).setRanges(Ranges.newBuilder().addRange(Range.newBuilder().setBegin(slot.getPort()).setEnd(slot.getPort())))).build();
toLaunch.get(id).add(task);
}
}
}
for (OfferID id : toLaunch.keySet()) {
List<TaskInfo> tasks = toLaunch.get(id);
LOG.info("Launching tasks for offer " + id.getValue() + "\n" + tasks.toString());
_driver.launchTasks(id, tasks);
_offers.remove(id);
}
}
}
Aggregations