use of backtype.storm.generated.StormTopology in project jstorm by alibaba.
the class ServiceHandler method getTopology.
/**
* get StormTopology throw deserialize local files
*
* @param id String: topology id
* @return StormTopology
*/
@Override
public StormTopology getTopology(String id) throws NotAliveException, TException {
StormTopology topology;
try {
StormTopology stormtopology = StormConfig.read_nimbus_topology_code(id, data.getBlobStore());
if (stormtopology == null) {
throw new NotAliveException("No topology of " + id);
}
Map<Object, Object> topologyConf = (Map<Object, Object>) StormConfig.read_nimbus_topology_conf(id, data.getBlobStore());
topology = Common.system_topology(topologyConf, stormtopology);
} catch (Exception e) {
LOG.error("Failed to get topology " + id + ",", e);
throw new TException("Failed to get system_topology");
}
return topology;
}
use of backtype.storm.generated.StormTopology in project jstorm by alibaba.
the class TopologyAssign method prepareTopologyAssign.
protected TopologyAssignContext prepareTopologyAssign(TopologyAssignEvent event) throws Exception {
TopologyAssignContext ret = new TopologyAssignContext();
String topologyId = event.getTopologyId();
ret.setTopologyId(topologyId);
int topoMasterId = nimbusData.getTasksHeartbeat().get(topologyId).get_topologyMasterId();
ret.setTopologyMasterTaskId(topoMasterId);
LOG.info("prepareTopologyAssign, topoMasterId={}", topoMasterId);
Map<Object, Object> nimbusConf = nimbusData.getConf();
Map<Object, Object> topologyConf = StormConfig.read_nimbus_topology_conf(topologyId, nimbusData.getBlobStore());
StormTopology rawTopology = StormConfig.read_nimbus_topology_code(topologyId, nimbusData.getBlobStore());
ret.setRawTopology(rawTopology);
Map stormConf = new HashMap();
stormConf.putAll(nimbusConf);
stormConf.putAll(topologyConf);
ret.setStormConf(stormConf);
StormClusterState stormClusterState = nimbusData.getStormClusterState();
// get all running supervisor, don't need callback to watch supervisor
Map<String, SupervisorInfo> supInfos = Cluster.get_all_SupervisorInfo(stormClusterState, null);
// init all AvailableWorkerPorts
for (Entry<String, SupervisorInfo> supInfo : supInfos.entrySet()) {
SupervisorInfo supervisor = supInfo.getValue();
if (supervisor != null)
supervisor.setAvailableWorkerPorts(supervisor.getWorkerPorts());
}
getAliveSupervsByHb(supInfos, nimbusConf);
if (supInfos.size() == 0) {
throw new FailedAssignTopologyException("Failed to make assignment " + topologyId + ", due to no alive supervisor");
}
Map<Integer, String> taskToComponent = Cluster.get_all_task_component(stormClusterState, topologyId, null);
ret.setTaskToComponent(taskToComponent);
// get taskids /ZK/tasks/topologyId
Set<Integer> allTaskIds = taskToComponent.keySet();
if (allTaskIds == null || allTaskIds.size() == 0) {
String errMsg = "Failed to get all task ID list from /ZK-dir/tasks/" + topologyId;
LOG.warn(errMsg);
throw new IOException(errMsg);
}
ret.setAllTaskIds(allTaskIds);
Set<Integer> aliveTasks = new HashSet<Integer>();
// unstoppedTasks are tasks which are alive on no supervisor's(dead)
// machine
Set<Integer> unstoppedTasks = new HashSet<Integer>();
Set<Integer> deadTasks = new HashSet<Integer>();
Set<ResourceWorkerSlot> unstoppedWorkers = new HashSet<ResourceWorkerSlot>();
Assignment existingAssignment = stormClusterState.assignment_info(topologyId, null);
if (existingAssignment != null) {
aliveTasks = getAliveTasks(topologyId, allTaskIds);
/*
* Check if the topology master task is alive first since all task
* heartbeat info is reported by topology master.
* If master is dead, do reassignment for topology master first.
*/
if (aliveTasks.contains(topoMasterId) == false) {
ResourceWorkerSlot worker = existingAssignment.getWorkerByTaskId(topoMasterId);
deadTasks.addAll(worker.getTasks());
Set<Integer> tempSet = new HashSet<Integer>(allTaskIds);
tempSet.removeAll(deadTasks);
aliveTasks.addAll(tempSet);
aliveTasks.removeAll(deadTasks);
} else {
deadTasks.addAll(allTaskIds);
deadTasks.removeAll(aliveTasks);
}
unstoppedTasks = getUnstoppedSlots(aliveTasks, supInfos, existingAssignment);
}
ret.setDeadTaskIds(deadTasks);
ret.setUnstoppedTaskIds(unstoppedTasks);
// Step 2: get all slots resource, free slots/ alive slots/ unstopped
// slots
getFreeSlots(supInfos, stormClusterState);
ret.setCluster(supInfos);
if (existingAssignment == null) {
ret.setAssignType(TopologyAssignContext.ASSIGN_TYPE_NEW);
try {
AssignmentBak lastAssignment = stormClusterState.assignment_bak(event.getTopologyName());
if (lastAssignment != null) {
ret.setOldAssignment(lastAssignment.getAssignment());
}
} catch (Exception e) {
LOG.warn("Fail to get old assignment", e);
}
} else {
ret.setOldAssignment(existingAssignment);
if (event.isScratch()) {
ret.setAssignType(TopologyAssignContext.ASSIGN_TYPE_REBALANCE);
ret.setIsReassign(event.isReassign());
unstoppedWorkers = getUnstoppedWorkers(unstoppedTasks, existingAssignment);
ret.setUnstoppedWorkers(unstoppedWorkers);
} else {
ret.setAssignType(TopologyAssignContext.ASSIGN_TYPE_MONITOR);
unstoppedWorkers = getUnstoppedWorkers(aliveTasks, existingAssignment);
ret.setUnstoppedWorkers(unstoppedWorkers);
}
}
return ret;
}
use of backtype.storm.generated.StormTopology in project jstorm by alibaba.
the class NimbusUtils method normalizeTopology.
/**
* finalize component's task parallism
*
* @param stormConf storm conf
* @param topology storm topology
* @param fromConf means if the paralism is read from conf file instead of reading from topology code
* @return normalized topology
*/
public static StormTopology normalizeTopology(Map stormConf, StormTopology topology, boolean fromConf) {
StormTopology ret = topology.deepCopy();
Map<String, Object> rawComponents = ThriftTopologyUtils.getComponents(topology);
Map<String, Object> components = ThriftTopologyUtils.getComponents(ret);
if (!rawComponents.keySet().equals(components.keySet())) {
String errMsg = "Failed to normalize topology binary, maybe due to wrong dependency";
LOG.info(errMsg + " raw components:" + rawComponents.keySet() + ", normalized " + components.keySet());
throw new InvalidParameterException(errMsg);
}
for (Entry<String, Object> entry : components.entrySet()) {
Object component = entry.getValue();
String componentName = entry.getKey();
ComponentCommon common = null;
if (component instanceof Bolt) {
common = ((Bolt) component).get_common();
if (fromConf) {
Integer paraNum = ConfigExtension.getBoltParallelism(stormConf, componentName);
if (paraNum != null) {
LOG.info("Set " + componentName + " as " + paraNum);
common.set_parallelism_hint(paraNum);
}
}
}
if (component instanceof SpoutSpec) {
common = ((SpoutSpec) component).get_common();
if (fromConf) {
Integer paraNum = ConfigExtension.getSpoutParallelism(stormConf, componentName);
if (paraNum != null) {
LOG.info("Set " + componentName + " as " + paraNum);
common.set_parallelism_hint(paraNum);
}
}
}
if (component instanceof StateSpoutSpec) {
common = ((StateSpoutSpec) component).get_common();
if (fromConf) {
Integer paraNum = ConfigExtension.getSpoutParallelism(stormConf, componentName);
if (paraNum != null) {
LOG.info("Set " + componentName + " as " + paraNum);
common.set_parallelism_hint(paraNum);
}
}
}
Map componentMap = new HashMap();
String jsonConfString = common.get_json_conf();
if (jsonConfString != null) {
componentMap.putAll((Map) JStormUtils.from_json(jsonConfString));
}
Integer taskNum = componentParalism(stormConf, common);
componentMap.put(Config.TOPOLOGY_TASKS, taskNum);
// change the executor's task number
common.set_parallelism_hint(taskNum);
LOG.info("Set " + componentName + " parallelism " + taskNum);
common.set_json_conf(JStormUtils.to_json(componentMap));
}
return ret;
}
use of backtype.storm.generated.StormTopology in project storm-lib by xumingming.
the class TestingApiDemo method testTimeout.
public void testTimeout() {
Config daemonConfig = new Config();
daemonConfig.put(Config.TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS, true);
MkClusterParam mkClusterParam = new MkClusterParam();
mkClusterParam.setDaemonConf(daemonConfig);
Testing.withSimulatedTimeLocalCluster(mkClusterParam, new TestJob() {
@Override
public void run(ILocalCluster cluster) {
AckFailMapTracker tracker = new AckFailMapTracker();
FeederSpout feeder = createFeederSpout("field1");
feeder.setAckFailDelegate(tracker);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("1", feeder);
builder.setBolt("2", new AckEveryOtherBolt()).globalGrouping("1");
StormTopology topology = builder.createTopology();
Config topologyConfig = new Config();
topologyConfig.setMessageTimeoutSecs(10);
/**
* TODO
*/
try {
cluster.submitTopology("timeout-tester", topologyConfig, topology);
} catch (AlreadyAliveException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (InvalidTopologyException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
feeder.feed(new Values("a"), 1);
feeder.feed(new Values("b"), 2);
feeder.feed(new Values("c"), 3);
/**
* TODO
*/
Testing.advanceClusterTime(cluster, 9);
assertAcked(tracker, 1, 3);
assertFalse(tracker.isFailed(2));
Testing.advanceClusterTime(cluster, 12);
assertFailed(tracker, 2);
}
});
}
Aggregations