use of com.alibaba.jstorm.schedule.default_assign.ResourceWorkerSlot in project jstorm by alibaba.
the class GrayUpgradeHandler method init.
@Override
public void init(TopologyMasterContext tmContext) {
this.tmContext = tmContext;
this.stormClusterState = tmContext.getZkCluster();
this.topologyId = tmContext.getTopologyId();
this.hostPortToTasks = new HashMap<>();
this.taskToHostPort = new HashMap<>();
for (ResourceWorkerSlot workerSlot : tmContext.getWorkerSet().get()) {
Set<Integer> tasks = workerSlot.getTasks();
String hostPort = workerSlot.getHostPort();
hostPortToTasks.put(hostPort, Sets.newHashSet(tasks));
for (Integer task : tasks) {
this.taskToHostPort.put(task, hostPort);
}
}
this.totalWorkers = new HashSet<>();
}
use of com.alibaba.jstorm.schedule.default_assign.ResourceWorkerSlot in project jstorm by alibaba.
the class HBaseMetricSendClient method updateTasks.
private void updateTasks(TaskDeadEvent event) {
Map<Integer, ResourceWorkerSlot> deadTasks = event.getDeadTasks();
List<KVSerializable> taskTrackList = new ArrayList<>(deadTasks.size());
for (Map.Entry<Integer, ResourceWorkerSlot> task : deadTasks.entrySet()) {
TaskTrack taskTrack = new TaskTrack(event.getClusterMetricsContext().getClusterName(), event.getTopologyId());
taskTrack.setEnd(new Date(event.getTimestamp()));
taskTrack.setTaskId(task.getKey());
taskTrack.setHost(task.getValue().getHostname());
taskTrack.setPort(task.getValue().getPort());
taskTrackList.add(taskTrack);
}
batchAdd(taskTrackList, TABLE_TASK_TRACK);
}
use of com.alibaba.jstorm.schedule.default_assign.ResourceWorkerSlot in project jstorm by alibaba.
the class HBaseMetricSendClient method insertOrUpdateTasks.
/**
* insert tasks on new assign, update tasks on rebalance.
*/
private void insertOrUpdateTasks(TaskStartEvent event) {
Assignment old = event.getOldAssignment();
Assignment current = event.getNewAssignment();
Map<Integer, String> task2Component = event.getTask2Component();
List<KVSerializable> taskTrackList = new ArrayList<>();
// assign
if (old == null) {
Set<ResourceWorkerSlot> workers = current.getWorkers();
logger.info("old workers are null, assigned workers:{}", Joiner.on(",").join(workers));
for (ResourceWorkerSlot worker : workers) {
Set<Integer> tasks = worker.getTasks();
for (Integer task : tasks) {
TaskTrack track = new TaskTrack(event.getClusterMetricsContext().getClusterName(), event.getTopologyId());
track.setStart(new Date(event.getTimestamp()));
track.setComponent(task2Component.get(task));
track.setHost(worker.getHostname());
track.setPort(worker.getPort());
track.setTaskId(task);
taskTrackList.add(track);
}
}
} else {
// rebalance, we only insert newly assigned tasks
Set<ResourceWorkerSlot> oldWorkers = old.getWorkers();
Joiner joiner = Joiner.on(",");
logger.info("old workers:{}, new workers:{}", joiner.join(oldWorkers), joiner.join(current.getWorkers()));
for (ResourceWorkerSlot worker : current.getWorkers()) {
// a new worker, insert all tasks
if (!oldWorkers.contains(worker)) {
for (Integer task : worker.getTasks()) {
TaskTrack track = new TaskTrack(event.getClusterMetricsContext().getClusterName(), event.getTopologyId());
track.setStart(new Date(event.getTimestamp()));
track.setComponent(task2Component.get(task));
track.setHost(worker.getHostname());
track.setPort(worker.getPort());
track.setTaskId(task);
taskTrackList.add(track);
}
} else {
for (Integer task : worker.getTasks()) {
ResourceWorkerSlot oldWorker = old.getWorkerByTaskId(task);
if (oldWorker != null) {
// update end time of old task
TaskTrack oldTrack = new TaskTrack(event.getClusterMetricsContext().getClusterName(), event.getTopologyId());
oldTrack.setEnd(new Date(event.getTimestamp()));
oldTrack.setTaskId(task);
oldTrack.setHost(oldWorker.getHostname());
oldTrack.setPort(oldWorker.getPort());
taskTrackList.add(oldTrack);
// insert new task
TaskTrack track = new TaskTrack(event.getClusterMetricsContext().getClusterName(), event.getTopologyId());
track.setStart(new Date());
track.setComponent(task2Component.get(task));
track.setHost(worker.getHostname());
track.setPort(worker.getPort());
track.setTaskId(task);
taskTrackList.add(track);
}
}
}
}
}
if (taskTrackList.size() > 0) {
batchAdd(taskTrackList, TABLE_TASK_TRACK);
}
}
use of com.alibaba.jstorm.schedule.default_assign.ResourceWorkerSlot in project jstorm by alibaba.
the class MonitorRunnable method run.
/**
* Todo: when one topology is being reassigned, the topology should skip check
*/
@Override
public void run() {
StormClusterState clusterState = data.getStormClusterState();
try {
// Note: need first check Assignments
List<String> activeTopologies = clusterState.assignments(null);
if (activeTopologies == null) {
LOG.info("Failed to get active topologies");
return;
}
for (String topologyId : activeTopologies) {
if (clusterState.storm_base(topologyId, null) == null) {
continue;
}
LOG.debug("Check tasks of topology " + topologyId);
// Note that we don't check /ZK-dir/taskbeats/topologyId to get task ids
Set<Integer> taskIds = clusterState.task_ids(topologyId);
if (taskIds == null) {
LOG.info("Failed to get task ids of " + topologyId);
continue;
}
Assignment assignment = clusterState.assignment_info(topologyId, null);
Set<Integer> deadTasks = new HashSet<>();
boolean needReassign = false;
for (Integer task : taskIds) {
boolean isTaskDead = NimbusUtils.isTaskDead(data, topologyId, task);
if (isTaskDead) {
deadTasks.add(task);
needReassign = true;
}
}
TopologyTaskHbInfo topologyHbInfo = data.getTasksHeartbeat().get(topologyId);
if (needReassign) {
if (topologyHbInfo != null) {
int topologyMasterId = topologyHbInfo.get_topologyMasterId();
if (deadTasks.contains(topologyMasterId)) {
deadTasks.clear();
if (assignment != null) {
ResourceWorkerSlot resource = assignment.getWorkerByTaskId(topologyMasterId);
if (resource != null)
deadTasks.addAll(resource.getTasks());
else
deadTasks.add(topologyMasterId);
}
} else {
Map<Integer, TaskHeartbeat> taskHbs = topologyHbInfo.get_taskHbs();
int launchTime = JStormUtils.parseInt(data.getConf().get(Config.NIMBUS_TASK_LAUNCH_SECS));
if (taskHbs == null || taskHbs.get(topologyMasterId) == null || taskHbs.get(topologyMasterId).get_uptime() < launchTime) {
/*try {
clusterState.topology_heartbeat(topologyId, topologyHbInfo);
} catch (Exception e) {
LOG.error("Failed to update task heartbeat info to ZK for " + topologyId, e);
}*/
return;
}
}
Map<Integer, ResourceWorkerSlot> deadTaskWorkers = new HashMap<>();
for (Integer task : deadTasks) {
LOG.info("Found " + topologyId + ", taskId:" + task + " is dead");
ResourceWorkerSlot resource = null;
if (assignment != null)
resource = assignment.getWorkerByTaskId(task);
if (resource != null) {
deadTaskWorkers.put(task, resource);
}
}
Map<ResourceWorkerSlot, List<Integer>> workersDeadTasks = JStormUtils.reverse_map(deadTaskWorkers);
for (Map.Entry<ResourceWorkerSlot, List<Integer>> entry : workersDeadTasks.entrySet()) {
ResourceWorkerSlot resource = entry.getKey();
// we only report one task
for (Integer task : entry.getValue()) {
Date now = new Date();
String nowStr = TimeFormat.getSecond(now);
String errorInfo = "Task-" + entry.getValue().toString() + " is dead on " + resource.getHostname() + ":" + resource.getPort() + ", " + nowStr;
LOG.info(errorInfo);
clusterState.report_task_error(topologyId, task, errorInfo, ErrorConstants.ERROR, ErrorConstants.CODE_TASK_DEAD, ErrorConstants.DURATION_SECS_TASK_DEAD);
break;
}
}
if (deadTaskWorkers.size() > 0) {
// notify jstorm monitor
TaskDeadEvent.pushEvent(topologyId, deadTaskWorkers);
}
}
NimbusUtils.transition(data, topologyId, false, StatusType.monitor);
}
if (topologyHbInfo != null) {
try {
clusterState.topology_heartbeat(topologyId, topologyHbInfo);
} catch (Exception e) {
LOG.error("Failed to update task heartbeat info to ZK for " + topologyId, e);
}
}
}
} catch (Exception e) {
LOG.error(e.getMessage(), e);
}
}
use of com.alibaba.jstorm.schedule.default_assign.ResourceWorkerSlot in project jstorm by alibaba.
the class TopologyAssign method getNewOrChangedTaskIds.
/**
* get all task ids which are newly assigned or reassigned
*/
public static Set<Integer> getNewOrChangedTaskIds(Set<ResourceWorkerSlot> oldWorkers, Set<ResourceWorkerSlot> workers) {
Set<Integer> rtn = new HashSet<>();
HashMap<String, ResourceWorkerSlot> workerPortMap = HostPortToWorkerMap(oldWorkers);
for (ResourceWorkerSlot worker : workers) {
ResourceWorkerSlot oldWorker = workerPortMap.get(worker.getHostPort());
if (oldWorker != null) {
Set<Integer> oldTasks = oldWorker.getTasks();
for (Integer task : worker.getTasks()) {
if (!(oldTasks.contains(task)))
rtn.add(task);
}
} else {
if (worker.getTasks() != null) {
rtn.addAll(worker.getTasks());
}
}
}
return rtn;
}
Aggregations