use of com.alibaba.jstorm.cluster.StormClusterState in project jstorm by alibaba.
the class DoRebalanceTransitionCallback method execute.
@Override
public <T> Object execute(T... args) {
boolean isSetTaskInfo = false;
try {
Boolean reassign = (Boolean) args[1];
// args[0]:
Map<Object, Object> conf = (Map<Object, Object>) args[2];
// conf
if (conf != null) {
boolean isConfUpdate = false;
Map stormConf = data.getConf();
// Update topology code
Map topoConf = StormConfig.read_nimbus_topology_conf(topologyid, data.getBlobStore());
StormTopology rawOldTopology = StormConfig.read_nimbus_topology_code(topologyid, data.getBlobStore());
StormTopology rawNewTopology = NimbusUtils.normalizeTopology(conf, rawOldTopology, true);
StormTopology sysOldTopology = rawOldTopology.deepCopy();
StormTopology sysNewTopology = rawNewTopology.deepCopy();
if (conf.get(Config.TOPOLOGY_ACKER_EXECUTORS) != null) {
Common.add_acker(topoConf, sysOldTopology);
Common.add_acker(conf, sysNewTopology);
int ackerNum = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_ACKER_EXECUTORS));
int oldAckerNum = JStormUtils.parseInt(topoConf.get(Config.TOPOLOGY_ACKER_EXECUTORS));
LOG.info("Update acker from oldAckerNum=" + oldAckerNum + " to ackerNum=" + ackerNum);
topoConf.put(Config.TOPOLOGY_ACKER_EXECUTORS, ackerNum);
isConfUpdate = true;
}
// If scale-out, setup task info for new added tasks
setTaskInfo(sysOldTopology, sysNewTopology);
isSetTaskInfo = true;
// If everything is OK, write topology code into disk
StormConfig.write_nimbus_topology_code(topologyid, Utils.serialize(rawNewTopology), data);
// Update topology conf if worker num has been updated
Set<Object> keys = conf.keySet();
Integer workerNum = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_WORKERS));
if (workerNum != null) {
Integer oldWorkerNum = JStormUtils.parseInt(topoConf.get(Config.TOPOLOGY_WORKERS));
topoConf.put(Config.TOPOLOGY_WORKERS, workerNum);
isConfUpdate = true;
LOG.info("Update worker num from " + oldWorkerNum + " to " + workerNum);
}
if (keys.contains(Config.ISOLATION_SCHEDULER_MACHINES)) {
topoConf.put(Config.ISOLATION_SCHEDULER_MACHINES, conf.get(Config.ISOLATION_SCHEDULER_MACHINES));
}
if (isConfUpdate) {
StormConfig.write_nimbus_topology_conf(topologyid, topoConf, data);
}
}
TopologyAssignEvent event = new TopologyAssignEvent();
event.setTopologyId(topologyid);
event.setScratch(true);
event.setOldStatus(oldStatus);
event.setReassign(reassign);
if (conf != null)
event.setScaleTopology(true);
TopologyAssign.push(event);
event.waitFinish();
} catch (Exception e) {
LOG.error("do-rebalance error!", e);
// Rollback the changes on ZK
if (isSetTaskInfo) {
try {
StormClusterState clusterState = data.getStormClusterState();
clusterState.remove_task(topologyid, newTasks);
} catch (Exception e1) {
LOG.error("Failed to rollback the changes on ZK for task-" + newTasks, e);
}
}
}
DelayStatusTransitionCallback delayCallback = new DelayStatusTransitionCallback(data, topologyid, oldStatus, StatusType.rebalancing, StatusType.done_rebalance);
return delayCallback.execute();
}
use of com.alibaba.jstorm.cluster.StormClusterState in project jstorm by alibaba.
the class UpdateTopologyTransitionCallback method execute.
@Override
public <T> Object execute(T... args) {
StormClusterState clusterState = data.getStormClusterState();
try {
Map userConf = (Map) args[0];
Map topoConf = StormConfig.read_nimbus_topology_conf(topologyId, data.getBlobStore());
topoConf.putAll(userConf);
Assignment assignment = clusterState.assignment_info(topologyId, null);
assignment.setAssignmentType(AssignmentType.UpdateTopology);
assignment.updateTimeStamp();
clusterState.set_assignment(topologyId, assignment);
LOG.info("Successfully update topology information to ZK for " + topologyId);
} catch (Exception e) {
LOG.error("Failed to update topology.", e);
}
return currentStatus;
}
use of com.alibaba.jstorm.cluster.StormClusterState in project jstorm by alibaba.
the class NimbusUtils method transitionName.
public static <T> void transitionName(NimbusData data, String topologyName, boolean errorOnNoTransition, StatusType transition_status, T... args) throws Exception {
StormClusterState stormClusterState = data.getStormClusterState();
String topologyId = Cluster.get_topology_id(stormClusterState, topologyName);
if (topologyId == null) {
throw new NotAliveException(topologyName);
}
transition(data, topologyId, errorOnNoTransition, transition_status, args);
}
use of com.alibaba.jstorm.cluster.StormClusterState in project jstorm by alibaba.
the class ServiceHandler method restart.
@Override
public void restart(String name, String jsonConf) throws TException, NotAliveException, InvalidTopologyException, TopologyAssignException {
LOG.info("Begin to restart " + name + ", new configuration:" + jsonConf);
// 1. get topologyId
StormClusterState stormClusterState = data.getStormClusterState();
String topologyId;
try {
topologyId = Cluster.get_topology_id(stormClusterState, name);
} catch (Exception e2) {
topologyId = null;
}
if (topologyId == null) {
LOG.info("No topology of " + name);
throw new NotAliveException("No topology of " + name);
}
// Restart the topology: Deactivate -> Kill -> Submit
// 2. Deactivate
deactivate(name);
JStormUtils.sleepMs(5000);
LOG.info("Deactivate " + name);
// 3. backup old jar/configuration/topology
StormTopology topology;
Map topologyConf;
String topologyCodeLocation = null;
try {
topology = StormConfig.read_nimbus_topology_code(topologyId, data.getBlobStore());
topologyConf = StormConfig.read_nimbus_topology_conf(topologyId, data.getBlobStore());
if (jsonConf != null) {
Map<Object, Object> newConf = (Map<Object, Object>) JStormUtils.from_json(jsonConf);
topologyConf.putAll(newConf);
}
// copy storm files back to inbox from blob store
String parent = StormConfig.masterInbox(conf);
topologyCodeLocation = parent + PathUtils.SEPERATOR + topologyId;
FileUtils.forceMkdir(new File(topologyCodeLocation));
FileUtils.cleanDirectory(new File(topologyCodeLocation));
copyBackToInbox(topologyId, topologyCodeLocation);
LOG.info("Successfully read old jar/conf/topology " + name);
notifyTopologyActionListener(name, "restart");
} catch (Exception e) {
LOG.error("Failed to read old jar/conf/topology", e);
if (topologyCodeLocation != null) {
try {
PathUtils.rmr(topologyCodeLocation);
} catch (IOException ignored) {
}
}
throw new TException("Failed to read old jar/conf/topology ");
}
// 4. Kill
// directly use remove command to kill, more stable than issue kill cmd
RemoveTransitionCallback killCb = new RemoveTransitionCallback(data, topologyId);
killCb.execute(new Object[0]);
LOG.info("Successfully kill the topology " + name);
// send metric events
KillTopologyEvent.pushEvent(topologyId);
// 5. submit
try {
submitTopology(name, topologyCodeLocation, JStormUtils.to_json(topologyConf), topology);
} catch (AlreadyAliveException e) {
LOG.info("Failed to kill the topology" + name);
throw new TException("Failed to kill the topology" + name);
} finally {
try {
PathUtils.rmr(topologyCodeLocation);
} catch (IOException ignored) {
}
}
}
use of com.alibaba.jstorm.cluster.StormClusterState in project jstorm by alibaba.
the class ServiceHandler method metricMonitor.
@Override
public void metricMonitor(String topologyName, MonitorOptions options) throws TException {
boolean isEnable = options.is_isEnable();
StormClusterState clusterState = data.getStormClusterState();
try {
String topologyId = Cluster.get_topology_id(clusterState, topologyName);
if (null != topologyId) {
clusterState.set_storm_monitor(topologyId, isEnable);
} else {
throw new NotAliveException("Failed to update metricsMonitor status as " + topologyName + " is not alive");
}
} catch (Exception e) {
String errMsg = "Failed to update metricsMonitor " + topologyName;
LOG.error(errMsg, e);
throw new TException(e);
}
}
Aggregations