use of org.apache.storm.cluster.IStormClusterState in project storm by apache.
the class Nimbus method doCleanup.
@VisibleForTesting
public void doCleanup() throws Exception {
if (!isLeader()) {
LOG.info("not a leader, skipping cleanup");
return;
}
IStormClusterState state = stormClusterState;
Set<String> toClean;
synchronized (submitLock) {
toClean = topoIdsToClean(state, blobStore);
}
if (toClean != null) {
for (String topoId : toClean) {
LOG.info("Cleaning up {}", topoId);
state.teardownHeartbeats(topoId);
state.teardownTopologyErrors(topoId);
state.removeBackpressure(topoId);
rmDependencyJarsInTopology(topoId);
forceDeleteTopoDistDir(topoId);
rmTopologyKeys(topoId);
heartbeatsCache.getAndUpdate(new Dissoc<>(topoId));
}
}
}
use of org.apache.storm.cluster.IStormClusterState in project storm by apache.
the class Nimbus method submitTopologyWithOpts.
@Override
public void submitTopologyWithOpts(String topoName, String uploadedJarLocation, String jsonConf, StormTopology topology, SubmitOptions options) throws AlreadyAliveException, InvalidTopologyException, AuthorizationException, TException {
try {
submitTopologyWithOptsCalls.mark();
assertIsLeader();
assert (options != null);
validateTopologyName(topoName);
checkAuthorization(topoName, null, "submitTopology");
assertTopoActive(topoName, false);
@SuppressWarnings("unchecked") Map<String, Object> topoConf = (Map<String, Object>) JSONValue.parse(jsonConf);
try {
ConfigValidation.validateFields(topoConf);
} catch (IllegalArgumentException ex) {
throw new InvalidTopologyException(ex.getMessage());
}
validator.validate(topoName, topoConf, topology);
Utils.validateTopologyBlobStoreMap(topoConf, Sets.newHashSet(blobStore.listKeys()));
long uniqueNum = submittedCount.incrementAndGet();
String topoId = topoName + "-" + uniqueNum + "-" + Time.currentTimeSecs();
Map<String, String> creds = null;
if (options.is_set_creds()) {
creds = options.get_creds().get_creds();
}
topoConf.put(Config.STORM_ID, topoId);
topoConf.put(Config.TOPOLOGY_NAME, topoName);
topoConf = normalizeConf(conf, topoConf, topology);
ReqContext req = ReqContext.context();
Principal principal = req.principal();
String submitterPrincipal = principal == null ? null : principal.toString();
String submitterUser = principalToLocal.toLocal(principal);
String systemUser = System.getProperty("user.name");
@SuppressWarnings("unchecked") Set<String> topoAcl = new HashSet<>((List<String>) topoConf.getOrDefault(Config.TOPOLOGY_USERS, Collections.emptyList()));
topoAcl.add(submitterPrincipal);
topoAcl.add(submitterUser);
topoConf.put(Config.TOPOLOGY_SUBMITTER_PRINCIPAL, OR(submitterPrincipal, ""));
//Don't let the user set who we launch as
topoConf.put(Config.TOPOLOGY_SUBMITTER_USER, OR(submitterUser, systemUser));
topoConf.put(Config.TOPOLOGY_USERS, new ArrayList<>(topoAcl));
topoConf.put(Config.STORM_ZOOKEEPER_SUPERACL, conf.get(Config.STORM_ZOOKEEPER_SUPERACL));
if (!Utils.isZkAuthenticationConfiguredStormServer(conf)) {
topoConf.remove(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_SCHEME);
topoConf.remove(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD);
}
if (!(Boolean) conf.getOrDefault(Config.STORM_TOPOLOGY_CLASSPATH_BEGINNING_ENABLED, false)) {
topoConf.remove(Config.TOPOLOGY_CLASSPATH_BEGINNING);
}
Map<String, Object> totalConf = merge(conf, topoConf);
topology = normalizeTopology(totalConf, topology);
IStormClusterState state = stormClusterState;
if (creds != null) {
Map<String, Object> finalConf = Collections.unmodifiableMap(topoConf);
for (INimbusCredentialPlugin autocred : nimbusAutocredPlugins) {
autocred.populateCredentials(creds, finalConf);
}
}
if (Utils.getBoolean(conf.get(Config.SUPERVISOR_RUN_WORKER_AS_USER), false) && (submitterUser == null || submitterUser.isEmpty())) {
throw new AuthorizationException("Could not determine the user to run this topology as.");
}
//this validates the structure of the topology
StormCommon.systemTopology(totalConf, topology);
validateTopologySize(topoConf, conf, topology);
if (Utils.isZkAuthenticationConfiguredStormServer(conf) && !Utils.isZkAuthenticationConfiguredTopology(topoConf)) {
throw new IllegalArgumentException("The cluster is configured for zookeeper authentication, but no payload was provided.");
}
LOG.info("Received topology submission for {} with conf {}", topoName, Utils.redactValue(topoConf, Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD));
// cleanup thread killing topology in b/w assignment and starting the topology
synchronized (submitLock) {
assertTopoActive(topoName, false);
//cred-update-lock is not needed here because creds are being added for the first time.
if (creds != null) {
state.setCredentials(topoId, new Credentials(creds), topoConf);
}
LOG.info("uploadedJar {}", uploadedJarLocation);
setupStormCode(conf, topoId, uploadedJarLocation, totalConf, topology);
waitForDesiredCodeReplication(totalConf, topoId);
state.setupHeatbeats(topoId);
if (Utils.getBoolean(totalConf.get(Config.TOPOLOGY_BACKPRESSURE_ENABLE), false)) {
state.setupBackpressure(topoId);
}
notifyTopologyActionListener(topoName, "submitTopology");
TopologyStatus status = null;
switch(options.get_initial_status()) {
case INACTIVE:
status = TopologyStatus.INACTIVE;
break;
case ACTIVE:
status = TopologyStatus.ACTIVE;
break;
default:
throw new IllegalArgumentException("Inital Status of " + options.get_initial_status() + " is not allowed.");
}
startTopology(topoName, topoId, status);
}
} catch (Exception e) {
LOG.warn("Topology submission exception. (topology name='{}')", topoName, e);
if (e instanceof TException) {
throw (TException) e;
}
throw new RuntimeException(e);
}
}
use of org.apache.storm.cluster.IStormClusterState in project storm by apache.
the class Nimbus method getSupervisorPageInfo.
@Override
public SupervisorPageInfo getSupervisorPageInfo(String superId, String host, boolean includeSys) throws NotAliveException, AuthorizationException, TException {
try {
getSupervisorPageInfoCalls.mark();
IStormClusterState state = stormClusterState;
Map<String, SupervisorInfo> superInfos = state.allSupervisorInfo();
Map<String, List<String>> hostToSuperId = new HashMap<>();
for (Entry<String, SupervisorInfo> entry : superInfos.entrySet()) {
String h = entry.getValue().get_hostname();
List<String> superIds = hostToSuperId.get(h);
if (superIds == null) {
superIds = new ArrayList<>();
hostToSuperId.put(h, superIds);
}
superIds.add(entry.getKey());
}
List<String> supervisorIds = null;
if (superId == null) {
supervisorIds = hostToSuperId.get(host);
} else {
supervisorIds = Arrays.asList(superId);
}
SupervisorPageInfo pageInfo = new SupervisorPageInfo();
Map<String, Assignment> topoToAssignment = state.topologyAssignments();
for (String sid : supervisorIds) {
SupervisorInfo info = superInfos.get(sid);
LOG.info("SIDL {} SI: {} ALL: {}", sid, info, superInfos);
SupervisorSummary supSum = makeSupervisorSummary(sid, info);
pageInfo.add_to_supervisor_summaries(supSum);
List<String> superTopologies = topologiesOnSupervisor(topoToAssignment, sid);
Set<String> userTopologies = filterAuthorized("getTopology", superTopologies);
for (String topoId : superTopologies) {
CommonTopoInfo common = getCommonTopoInfo(topoId, "getSupervisorPageInfo");
String topoName = common.topoName;
Assignment assignment = common.assignment;
Map<List<Integer>, Map<String, Object>> beats = common.beats;
Map<Integer, String> taskToComp = common.taskToComponent;
Map<List<Long>, List<Object>> exec2NodePort = new HashMap<>();
Map<String, String> nodeToHost;
if (assignment != null) {
Map<List<Long>, NodeInfo> execToNodeInfo = assignment.get_executor_node_port();
for (Entry<List<Long>, NodeInfo> entry : execToNodeInfo.entrySet()) {
NodeInfo ni = entry.getValue();
List<Object> nodePort = Arrays.asList(ni.get_node(), ni.get_port_iterator().next());
exec2NodePort.put(entry.getKey(), nodePort);
}
nodeToHost = assignment.get_node_host();
} else {
nodeToHost = Collections.emptyMap();
}
Map<WorkerSlot, WorkerResources> workerResources = getWorkerResourcesForTopology(topoId);
boolean isAllowed = userTopologies.contains(topoId);
for (WorkerSummary workerSummary : StatsUtil.aggWorkerStats(topoId, topoName, taskToComp, beats, exec2NodePort, nodeToHost, workerResources, includeSys, isAllowed, sid)) {
pageInfo.add_to_worker_summaries(workerSummary);
}
}
}
return pageInfo;
} catch (Exception e) {
LOG.warn("Get super page info exception. (super id='{}')", superId, e);
if (e instanceof TException) {
throw (TException) e;
}
throw new RuntimeException(e);
}
}
use of org.apache.storm.cluster.IStormClusterState in project storm by apache.
the class Nimbus method tryReadTopoConfFromName.
private Map<String, Object> tryReadTopoConfFromName(final String topoName) throws NotAliveException, AuthorizationException, IOException {
IStormClusterState state = stormClusterState;
String topoId = state.getTopoId(topoName).orElseThrow(() -> new NotAliveException(topoName + " is not alive"));
return tryReadTopoConf(topoId, blobStore);
}
use of org.apache.storm.cluster.IStormClusterState in project storm by apache.
the class Nimbus method getTopologyPageInfo.
@Override
public TopologyPageInfo getTopologyPageInfo(String topoId, String window, boolean includeSys) throws NotAliveException, AuthorizationException, TException {
try {
getTopologyPageInfoCalls.mark();
CommonTopoInfo common = getCommonTopoInfo(topoId, "getTopologyPageInfo");
String topoName = common.topoName;
IStormClusterState state = stormClusterState;
int launchTimeSecs = common.launchTimeSecs;
Assignment assignment = common.assignment;
Map<List<Integer>, Map<String, Object>> beats = common.beats;
Map<Integer, String> taskToComp = common.taskToComponent;
StormTopology topology = common.topology;
Map<String, Object> topoConf = common.topoConf;
StormBase base = common.base;
if (base == null) {
throw new NotAliveException(topoId);
}
Map<WorkerSlot, WorkerResources> workerToResources = getWorkerResourcesForTopology(topoId);
List<WorkerSummary> workerSummaries = null;
Map<List<Long>, List<Object>> exec2NodePort = new HashMap<>();
if (assignment != null) {
Map<List<Long>, NodeInfo> execToNodeInfo = assignment.get_executor_node_port();
Map<String, String> nodeToHost = assignment.get_node_host();
for (Entry<List<Long>, NodeInfo> entry : execToNodeInfo.entrySet()) {
NodeInfo ni = entry.getValue();
List<Object> nodePort = Arrays.asList(ni.get_node(), ni.get_port_iterator().next());
exec2NodePort.put(entry.getKey(), nodePort);
}
workerSummaries = StatsUtil.aggWorkerStats(topoId, topoName, taskToComp, beats, exec2NodePort, nodeToHost, workerToResources, includeSys, //this is the topology page, so we know the user is authorized
true);
}
TopologyPageInfo topoPageInfo = StatsUtil.aggTopoExecsStats(topoId, exec2NodePort, taskToComp, beats, topology, window, includeSys, state);
Map<String, Map<String, Double>> spoutResources = ResourceUtils.getSpoutsResources(topology, topoConf);
for (Entry<String, ComponentAggregateStats> entry : topoPageInfo.get_id_to_spout_agg_stats().entrySet()) {
CommonAggregateStats commonStats = entry.getValue().get_common_stats();
commonStats.set_resources_map(setResourcesDefaultIfNotSet(spoutResources, entry.getKey(), topoConf));
}
Map<String, Map<String, Double>> boltResources = ResourceUtils.getBoltsResources(topology, topoConf);
for (Entry<String, ComponentAggregateStats> entry : topoPageInfo.get_id_to_bolt_agg_stats().entrySet()) {
CommonAggregateStats commonStats = entry.getValue().get_common_stats();
commonStats.set_resources_map(setResourcesDefaultIfNotSet(boltResources, entry.getKey(), topoConf));
}
if (workerSummaries != null) {
topoPageInfo.set_workers(workerSummaries);
}
if (base.is_set_owner()) {
topoPageInfo.set_owner(base.get_owner());
}
String schedStatus = idToSchedStatus.get().get(topoId);
if (schedStatus != null) {
topoPageInfo.set_sched_status(schedStatus);
}
TopologyResources resources = getResourcesForTopology(topoId, base);
if (resources != null) {
topoPageInfo.set_requested_memonheap(resources.getRequestedMemOnHeap());
topoPageInfo.set_requested_memoffheap(resources.getRequestedMemOffHeap());
topoPageInfo.set_requested_cpu(resources.getRequestedCpu());
topoPageInfo.set_assigned_memonheap(resources.getAssignedMemOnHeap());
topoPageInfo.set_assigned_memoffheap(resources.getAssignedMemOffHeap());
topoPageInfo.set_assigned_cpu(resources.getAssignedCpu());
}
topoPageInfo.set_name(topoName);
topoPageInfo.set_status(extractStatusStr(base));
topoPageInfo.set_uptime_secs(Time.deltaSecs(launchTimeSecs));
topoPageInfo.set_topology_conf(JSONValue.toJSONString(topoConf));
topoPageInfo.set_replication_count(getBlobReplicationCount(ConfigUtils.masterStormCodeKey(topoId)));
if (base.is_set_component_debug()) {
DebugOptions debug = base.get_component_debug().get(topoId);
if (debug != null) {
topoPageInfo.set_debug_options(debug);
}
}
return topoPageInfo;
} catch (Exception e) {
LOG.warn("Get topo page info exception. (topology id='{}')", topoId, e);
if (e instanceof TException) {
throw (TException) e;
}
throw new RuntimeException(e);
}
}
Aggregations