use of org.apache.storm.cluster.IStormClusterState in project storm by apache.
the class Worker method start.
public void start() throws Exception {
LOG.info("Launching worker for {} on {}:{} with id {} and conf {}", topologyId, assignmentId, port, workerId, conf);
// if ConfigUtils.isLocalMode(conf) returns false then it is in distributed mode.
if (!ConfigUtils.isLocalMode(conf)) {
// Distributed mode
SysOutOverSLF4J.sendSystemOutAndErrToSLF4J();
String pid = Utils.processPid();
FileUtils.touch(new File(ConfigUtils.workerPidPath(conf, workerId, pid)));
FileUtils.writeStringToFile(new File(ConfigUtils.workerArtifactsPidPath(conf, topologyId, port)), pid, Charset.forName("UTF-8"));
}
final Map topologyConf = ConfigUtils.overrideLoginConfigWithSystemProperty(ConfigUtils.readSupervisorStormConf(conf, topologyId));
List<ACL> acls = Utils.getWorkerACL(topologyConf);
IStateStorage stateStorage = ClusterUtils.mkStateStorage(conf, topologyConf, acls, new ClusterStateContext(DaemonType.WORKER));
IStormClusterState stormClusterState = ClusterUtils.mkStormClusterState(stateStorage, acls, new ClusterStateContext());
Credentials initialCredentials = stormClusterState.credentials(topologyId, null);
Map<String, String> initCreds = new HashMap<>();
if (initialCredentials != null) {
initCreds.putAll(initialCredentials.get_creds());
}
autoCreds = AuthUtils.GetAutoCredentials(topologyConf);
subject = AuthUtils.populateSubject(null, autoCreds, initCreds);
Subject.doAs(subject, new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
workerState = new WorkerState(conf, context, topologyId, assignmentId, port, workerId, topologyConf, stateStorage, stormClusterState);
// Heartbeat here so that worker process dies if this fails
// it's important that worker heartbeat to supervisor ASAP so that supervisor knows
// that worker is running and moves on
doHeartBeat();
executorsAtom = new AtomicReference<>(null);
// launch heartbeat threads immediately so that slow-loading tasks don't cause the worker to timeout
// to the supervisor
workerState.heartbeatTimer.scheduleRecurring(0, (Integer) conf.get(Config.WORKER_HEARTBEAT_FREQUENCY_SECS), () -> {
try {
doHeartBeat();
} catch (IOException e) {
throw new RuntimeException(e);
}
});
workerState.executorHeartbeatTimer.scheduleRecurring(0, (Integer) conf.get(Config.WORKER_HEARTBEAT_FREQUENCY_SECS), Worker.this::doExecutorHeartbeats);
workerState.registerCallbacks();
workerState.refreshConnections(null);
workerState.activateWorkerWhenAllConnectionsReady();
workerState.refreshStormActive(null);
workerState.runWorkerStartHooks();
List<IRunningExecutor> newExecutors = new ArrayList<IRunningExecutor>();
for (List<Long> e : workerState.getExecutors()) {
if (ConfigUtils.isLocalMode(topologyConf)) {
newExecutors.add(LocalExecutor.mkExecutor(workerState, e, initCreds).execute());
} else {
newExecutors.add(Executor.mkExecutor(workerState, e, initCreds).execute());
}
}
executorsAtom.set(newExecutors);
EventHandler<Object> tupleHandler = (packets, seqId, batchEnd) -> workerState.sendTuplesToRemoteWorker((HashMap<Integer, ArrayList<TaskMessage>>) packets, seqId, batchEnd);
// This thread will publish the messages destined for remote tasks to remote connections
transferThread = Utils.asyncLoop(() -> {
workerState.transferQueue.consumeBatchWhenAvailable(tupleHandler);
return 0L;
});
DisruptorBackpressureCallback disruptorBackpressureHandler = mkDisruptorBackpressureHandler(workerState);
workerState.transferQueue.registerBackpressureCallback(disruptorBackpressureHandler);
workerState.transferQueue.setEnableBackpressure((Boolean) topologyConf.get(Config.TOPOLOGY_BACKPRESSURE_ENABLE));
workerState.transferQueue.setHighWaterMark(Utils.getDouble(topologyConf.get(Config.BACKPRESSURE_DISRUPTOR_HIGH_WATERMARK)));
workerState.transferQueue.setLowWaterMark(Utils.getDouble(topologyConf.get(Config.BACKPRESSURE_DISRUPTOR_LOW_WATERMARK)));
WorkerBackpressureCallback backpressureCallback = mkBackpressureHandler();
backpressureThread = new WorkerBackpressureThread(workerState.backpressureTrigger, workerState, backpressureCallback);
if ((Boolean) topologyConf.get(Config.TOPOLOGY_BACKPRESSURE_ENABLE)) {
backpressureThread.start();
stormClusterState.topologyBackpressure(topologyId, workerState::refreshThrottle);
int pollingSecs = Utils.getInt(topologyConf.get(Config.TASK_BACKPRESSURE_POLL_SECS));
workerState.refreshBackpressureTimer.scheduleRecurring(0, pollingSecs, workerState::refreshThrottle);
}
credentialsAtom = new AtomicReference<Credentials>(initialCredentials);
establishLogSettingCallback();
workerState.stormClusterState.credentials(topologyId, Worker.this::checkCredentialsChanged);
workerState.refreshCredentialsTimer.scheduleRecurring(0, (Integer) conf.get(Config.TASK_CREDENTIALS_POLL_SECS), new Runnable() {
@Override
public void run() {
checkCredentialsChanged();
if ((Boolean) topologyConf.get(Config.TOPOLOGY_BACKPRESSURE_ENABLE)) {
checkThrottleChanged();
}
}
});
// The jitter allows the clients to get the data at different times, and avoids thundering herd
if (!(Boolean) topologyConf.get(Config.TOPOLOGY_DISABLE_LOADAWARE_MESSAGING)) {
workerState.refreshLoadTimer.scheduleRecurringWithJitter(0, 1, 500, workerState::refreshLoad);
}
workerState.refreshConnectionsTimer.scheduleRecurring(0, (Integer) conf.get(Config.TASK_REFRESH_POLL_SECS), workerState::refreshConnections);
workerState.resetLogLevelsTimer.scheduleRecurring(0, (Integer) conf.get(Config.WORKER_LOG_LEVEL_RESET_POLL_SECS), logConfigManager::resetLogLevels);
workerState.refreshActiveTimer.scheduleRecurring(0, (Integer) conf.get(Config.TASK_REFRESH_POLL_SECS), workerState::refreshStormActive);
LOG.info("Worker has topology config {}", Utils.redactValue(topologyConf, Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD));
LOG.info("Worker {} for storm {} on {}:{} has finished loading", workerId, topologyId, assignmentId, port);
return this;
}
;
});
}
use of org.apache.storm.cluster.IStormClusterState in project storm by apache.
the class Nimbus method setWorkerProfiler.
@Override
public void setWorkerProfiler(String topoId, ProfileRequest profileRequest) throws TException {
try {
setWorkerProfilerCalls.mark();
Map<String, Object> topoConf = tryReadTopoConf(topoId, blobStore);
String topoName = (String) topoConf.get(Config.TOPOLOGY_NAME);
checkAuthorization(topoName, topoConf, "setWorkerProfiler");
IStormClusterState state = stormClusterState;
state.setWorkerProfileRequest(topoId, profileRequest);
} catch (Exception e) {
LOG.warn("set worker profiler topology exception. (topology id='{}')", topoId, e);
if (e instanceof TException) {
throw (TException) e;
}
throw new RuntimeException(e);
}
}
use of org.apache.storm.cluster.IStormClusterState in project storm by apache.
the class Nimbus method getLogConfig.
@Override
public LogConfig getLogConfig(String topoId) throws TException {
try {
getLogConfigCalls.mark();
Map<String, Object> topoConf = tryReadTopoConf(topoId, blobStore);
String topoName = (String) topoConf.get(Config.TOPOLOGY_NAME);
checkAuthorization(topoName, topoConf, "getLogConfig");
IStormClusterState state = stormClusterState;
LogConfig logConfig = state.topologyLogConfig(topoId, null);
if (logConfig == null) {
logConfig = new LogConfig();
}
return logConfig;
} catch (Exception e) {
LOG.warn("get log conf topology exception. (topology id='{}')", topoId, e);
if (e instanceof TException) {
throw (TException) e;
}
throw new RuntimeException(e);
}
}
use of org.apache.storm.cluster.IStormClusterState in project storm by apache.
the class Nimbus method startTopology.
private void startTopology(String topoName, String topoId, TopologyStatus initStatus) throws KeyNotFoundException, AuthorizationException, IOException, InvalidTopologyException {
assert (TopologyStatus.ACTIVE == initStatus || TopologyStatus.INACTIVE == initStatus);
IStormClusterState state = stormClusterState;
BlobStore store = blobStore;
Map<String, Object> topoConf = readTopoConf(topoId, store);
StormTopology topology = StormCommon.systemTopology(topoConf, readStormTopology(topoId, store));
Map<String, Integer> numExecutors = new HashMap<>();
for (Entry<String, Object> entry : StormCommon.allComponents(topology).entrySet()) {
numExecutors.put(entry.getKey(), StormCommon.numStartExecutors(entry.getValue()));
}
LOG.info("Activating {}: {}", topoName, topoId);
StormBase base = new StormBase();
base.set_name(topoName);
base.set_launch_time_secs(Time.currentTimeSecs());
base.set_status(initStatus);
base.set_num_workers(Utils.getInt(topoConf.get(Config.TOPOLOGY_WORKERS), 0));
base.set_component_executors(numExecutors);
base.set_owner((String) topoConf.get(Config.TOPOLOGY_SUBMITTER_USER));
base.set_component_debug(new HashMap<>());
state.activateStorm(topoId, base);
notifyTopologyActionListener(topoName, "activate");
}
use of org.apache.storm.cluster.IStormClusterState in project storm by apache.
the class Nimbus method setLogConfig.
@Override
public void setLogConfig(String topoId, LogConfig config) throws TException {
try {
setLogConfigCalls.mark();
Map<String, Object> topoConf = tryReadTopoConf(topoId, blobStore);
String topoName = (String) topoConf.get(Config.TOPOLOGY_NAME);
checkAuthorization(topoName, topoConf, "setLogConfig");
IStormClusterState state = stormClusterState;
LogConfig mergedLogConfig = state.topologyLogConfig(topoId, null);
if (mergedLogConfig == null) {
mergedLogConfig = new LogConfig();
}
Map<String, LogLevel> namedLoggers = mergedLogConfig.get_named_logger_level();
for (LogLevel level : namedLoggers.values()) {
level.set_action(LogLevelAction.UNCHANGED);
}
if (config.is_set_named_logger_level()) {
for (Entry<String, LogLevel> entry : config.get_named_logger_level().entrySet()) {
LogLevel logConfig = entry.getValue();
String loggerName = entry.getKey();
LogLevelAction action = logConfig.get_action();
if (loggerName.isEmpty()) {
throw new RuntimeException("Named loggers need a valid name. Use ROOT for the root logger");
}
switch(action) {
case UPDATE:
setLoggerTimeouts(logConfig);
mergedLogConfig.put_to_named_logger_level(loggerName, logConfig);
break;
case REMOVE:
Map<String, LogLevel> nl = mergedLogConfig.get_named_logger_level();
if (nl != null) {
nl.remove(loggerName);
}
break;
default:
//NOOP
break;
}
}
}
LOG.info("Setting log config for {}:{}", topoName, mergedLogConfig);
state.setTopologyLogConfig(topoId, mergedLogConfig);
} catch (Exception e) {
LOG.warn("set log config topology exception. (topology id='{}')", topoId, e);
if (e instanceof TException) {
throw (TException) e;
}
throw new RuntimeException(e);
}
}
Aggregations