use of org.apache.storm.cluster.IStormClusterState in project storm by apache.
the class Nimbus method debug.
@Override
public void debug(String topoName, String componentId, boolean enable, double samplingPercentage) throws NotAliveException, AuthorizationException, TException {
debugCalls.mark();
try {
IStormClusterState state = stormClusterState;
String topoId = toTopoId(topoName);
Map<String, Object> topoConf = tryReadTopoConf(topoId, blobStore);
// make sure samplingPct is within bounds.
double spct = Math.max(Math.min(samplingPercentage, 100.0), 0.0);
// while disabling we retain the sampling pct.
checkAuthorization(topoName, topoConf, "debug");
if (topoId == null) {
throw new NotAliveException(topoName);
}
boolean hasCompId = componentId != null && !componentId.isEmpty();
DebugOptions options = new DebugOptions();
options.set_enable(enable);
if (enable) {
options.set_samplingpct(spct);
}
StormBase updates = new StormBase();
//For backwards compatability
updates.set_component_executors(Collections.emptyMap());
String key = hasCompId ? componentId : topoId;
updates.put_to_component_debug(key, options);
LOG.info("Nimbus setting debug to {} for storm-name '{}' storm-id '{}' sanpling pct '{}'" + (hasCompId ? " component-id '" + componentId + "'" : ""), enable, topoName, topoId, spct);
synchronized (submitLock) {
state.updateStorm(topoId, updates);
}
} catch (Exception e) {
LOG.warn("debug topology exception. (topology name='{}')", topoName, e);
if (e instanceof TException) {
throw (TException) e;
}
throw new RuntimeException(e);
}
}
use of org.apache.storm.cluster.IStormClusterState in project storm by apache.
the class Nimbus method getResourcesForTopology.
private TopologyResources getResourcesForTopology(String topoId, StormBase base) throws NotAliveException, AuthorizationException, InvalidTopologyException, IOException {
TopologyResources ret = idToResources.get().get(topoId);
if (ret == null) {
try {
IStormClusterState state = stormClusterState;
TopologyDetails details = readTopologyDetails(topoId, base);
double sumOnHeap = 0.0;
double sumOffHeap = 0.0;
double sumCPU = 0.0;
Assignment assignment = state.assignmentInfo(topoId, null);
if (assignment != null) {
if (assignment.is_set_worker_resources()) {
for (WorkerResources wr : assignment.get_worker_resources().values()) {
if (wr.is_set_cpu()) {
sumCPU += wr.get_cpu();
}
if (wr.is_set_mem_off_heap()) {
sumOffHeap += wr.get_mem_off_heap();
}
if (wr.is_set_mem_on_heap()) {
sumOnHeap += wr.get_mem_on_heap();
}
}
}
}
ret = new TopologyResources(details.getTotalRequestedMemOnHeap(), details.getTotalRequestedMemOffHeap(), details.getTotalRequestedCpu(), sumOnHeap, sumOffHeap, sumCPU);
} catch (KeyNotFoundException e) {
//This can happen when a topology is first coming up
// It's thrown by the blobstore code
LOG.error("Failed to get topology details", e);
ret = new TopologyResources(0.0, 0.0, 0.0, 0.0, 0.0, 0.0);
}
}
return ret;
}
use of org.apache.storm.cluster.IStormClusterState in project storm by apache.
the class Nimbus method getCommonTopoInfo.
private CommonTopoInfo getCommonTopoInfo(String topoId, String operation) throws NotAliveException, AuthorizationException, IOException, InvalidTopologyException {
BlobStore store = blobStore;
IStormClusterState state = stormClusterState;
CommonTopoInfo ret = new CommonTopoInfo();
ret.topoConf = tryReadTopoConf(topoId, store);
ret.topoName = (String) ret.topoConf.get(Config.TOPOLOGY_NAME);
checkAuthorization(ret.topoName, ret.topoConf, operation);
ret.topology = tryReadTopology(topoId, store);
ret.taskToComponent = StormCommon.stormTaskInfo(ret.topology, ret.topoConf);
ret.base = state.stormBase(topoId, null);
if (ret.base != null && ret.base.is_set_launch_time_secs()) {
ret.launchTimeSecs = ret.base.get_launch_time_secs();
} else {
ret.launchTimeSecs = 0;
}
ret.assignment = state.assignmentInfo(topoId, null);
ret.beats = OR(heartbeatsCache.get().get(topoId), Collections.<List<Integer>, Map<String, Object>>emptyMap());
ret.allComponents = new HashSet<>(ret.taskToComponent.values());
return ret;
}
use of org.apache.storm.cluster.IStormClusterState in project storm by apache.
the class Nimbus method getTopologyHistory.
@SuppressWarnings("unchecked")
@Override
public TopologyHistoryInfo getTopologyHistory(String user) throws AuthorizationException, TException {
try {
List<String> adminUsers = (List<String>) conf.getOrDefault(Config.NIMBUS_ADMINS, Collections.emptyList());
IStormClusterState state = stormClusterState;
BlobStore store = blobStore;
List<String> assignedIds = state.assignments(null);
Set<String> ret = new HashSet<>();
boolean isAdmin = adminUsers.contains(user);
for (String topoId : assignedIds) {
Map<String, Object> topoConf = tryReadTopoConf(topoId, store);
List<String> groups = ConfigUtils.getTopoLogsGroups(topoConf);
List<String> topoLogUsers = ConfigUtils.getTopoLogsUsers(topoConf);
if (user == null || isAdmin || isUserPartOf(user, groups) || topoLogUsers.contains(user)) {
ret.add(topoId);
}
}
ret.addAll(readTopologyHistory(user, adminUsers));
return new TopologyHistoryInfo(new ArrayList<>(ret));
} catch (Exception e) {
LOG.warn("Get topology history. (user='{}')", user, e);
if (e instanceof TException) {
throw (TException) e;
}
throw new RuntimeException(e);
}
}
use of org.apache.storm.cluster.IStormClusterState in project storm by apache.
the class Nimbus method launchServer.
@VisibleForTesting
public void launchServer() throws Exception {
try {
BlobStore store = blobStore;
IStormClusterState state = stormClusterState;
NimbusInfo hpi = nimbusHostPortInfo;
LOG.info("Starting Nimbus with conf {}", conf);
validator.prepare(conf);
//add to nimbuses
state.addNimbusHost(hpi.getHost(), new NimbusSummary(hpi.getHost(), hpi.getPort(), Time.currentTimeSecs(), false, STORM_VERSION));
leaderElector.addToLeaderLockQueue();
if (store instanceof LocalFsBlobStore) {
//register call back for blob-store
state.blobstore(() -> {
try {
blobSync();
} catch (Exception e) {
throw new RuntimeException(e);
}
});
setupBlobstore();
}
for (ClusterMetricsConsumerExecutor exec : clusterConsumerExceutors) {
exec.prepare();
}
if (isLeader()) {
for (String topoId : state.activeStorms()) {
transition(topoId, TopologyActions.STARTUP, null);
}
}
final boolean doNotReassign = (Boolean) conf.getOrDefault(ConfigUtils.NIMBUS_DO_NOT_REASSIGN, false);
timer.scheduleRecurring(0, Utils.getInt(conf.get(Config.NIMBUS_MONITOR_FREQ_SECS)), () -> {
try {
if (!doNotReassign) {
mkAssignments();
}
doCleanup();
} catch (Exception e) {
throw new RuntimeException(e);
}
});
// Schedule Nimbus inbox cleaner
final int jarExpSecs = Utils.getInt(conf.get(Config.NIMBUS_INBOX_JAR_EXPIRATION_SECS));
timer.scheduleRecurring(0, Utils.getInt(conf.get(Config.NIMBUS_CLEANUP_INBOX_FREQ_SECS)), () -> {
try {
cleanInbox(getInbox(), jarExpSecs);
} catch (Exception e) {
throw new RuntimeException(e);
}
});
//Schedule nimbus code sync thread to sync code from other nimbuses.
if (store instanceof LocalFsBlobStore) {
timer.scheduleRecurring(0, Utils.getInt(conf.get(Config.NIMBUS_CODE_SYNC_FREQ_SECS)), () -> {
try {
blobSync();
} catch (Exception e) {
throw new RuntimeException(e);
}
});
}
// Schedule topology history cleaner
Integer interval = Utils.getInt(conf.get(Config.LOGVIEWER_CLEANUP_INTERVAL_SECS), null);
if (interval != null) {
final int lvCleanupAgeMins = Utils.getInt(conf.get(Config.LOGVIEWER_CLEANUP_AGE_MINS));
timer.scheduleRecurring(0, interval, () -> {
try {
cleanTopologyHistory(lvCleanupAgeMins);
} catch (Exception e) {
throw new RuntimeException(e);
}
});
}
timer.scheduleRecurring(0, Utils.getInt(conf.get(Config.NIMBUS_CREDENTIAL_RENEW_FREQ_SECS)), () -> {
try {
renewCredentials();
} catch (Exception e) {
throw new RuntimeException(e);
}
});
StormMetricsRegistry.registerGauge("nimbus:num-supervisors", () -> state.supervisors(null));
StormMetricsRegistry.startMetricsReporters(conf);
if (clusterConsumerExceutors != null) {
timer.scheduleRecurring(0, Utils.getInt(conf.get(Config.STORM_CLUSTER_METRICS_CONSUMER_PUBLISH_INTERVAL_SECS)), () -> {
try {
if (isLeader()) {
sendClusterMetricsToExecutors();
}
} catch (Exception e) {
throw new RuntimeException(e);
}
});
}
} catch (Exception e) {
if (Utils.exceptionCauseIsInstanceOf(InterruptedException.class, e)) {
throw e;
}
if (Utils.exceptionCauseIsInstanceOf(InterruptedIOException.class, e)) {
throw e;
}
LOG.error("Error on initialization of nimbus", e);
Utils.exitProcess(13, "Error on initialization of nimbus");
}
}
Aggregations