use of org.apache.storm.cluster.ClusterStateContext in project storm by apache.
the class StormClusterStateImplTest method init.
@Before
public void init() throws Exception {
storage = Mockito.mock(IStateStorage.class);
context = new ClusterStateContext();
state = new StormClusterStateImpl(storage, null, /*acls*/
context, false);
}
use of org.apache.storm.cluster.ClusterStateContext in project storm by apache.
the class AdminCommands method initialize.
private static void initialize() {
conf = ConfigUtils.readStormConfig();
nimbusBlobStore = Utils.getNimbusBlobStore(conf, NimbusInfo.fromConf(conf));
List<String> servers = (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS);
Object port = conf.get(Config.STORM_ZOOKEEPER_PORT);
List<ACL> acls = null;
if (Utils.isZkAuthenticationConfiguredStormServer(conf)) {
acls = adminZkAcls();
}
try {
stormClusterState = ClusterUtils.mkStormClusterState(conf, acls, new ClusterStateContext(DaemonType.NIMBUS));
} catch (Exception e) {
LOG.error("admin can't create stormClusterState");
new RuntimeException(e);
}
CuratorFramework zk = Zookeeper.mkClient(conf, servers, port, "", new DefaultWatcherCallBack(), conf);
}
use of org.apache.storm.cluster.ClusterStateContext in project storm by apache.
the class Heartbeats method main.
public static void main(String[] args) throws Exception {
if (args.length < 2) {
throw new IllegalArgumentException("Command and path arguments must be provided.");
}
String command = args[0];
String path = args[1];
Map<String, Object> conf = Utils.readStormConfig();
IStateStorage cluster = ClusterUtils.mkStateStorage(conf, conf, new ClusterStateContext());
LOG.info("Command: [{}]", command);
switch(command) {
case "list":
handleListCommand(cluster, path);
break;
case "get":
handleGetCommand(cluster, path);
break;
default:
LOG.info("Usage: heartbeats [list|get] path");
}
try {
cluster.close();
} catch (Exception e) {
LOG.info("Caught exception: {} on close.", e);
}
// force process to be terminated
System.exit(0);
}
use of org.apache.storm.cluster.ClusterStateContext in project storm by apache.
the class Worker method start.
public void start() throws Exception {
LOG.info("Launching worker for {} on {}:{} with id {} and conf {}", topologyId, assignmentId, port, workerId, ConfigUtils.maskPasswords(conf));
// if ConfigUtils.isLocalMode(conf) returns false then it is in distributed mode.
if (!ConfigUtils.isLocalMode(conf)) {
// Distributed mode
SysOutOverSLF4J.sendSystemOutAndErrToSLF4J();
String pid = Utils.processPid();
FileUtils.touch(new File(ConfigUtils.workerPidPath(conf, workerId, pid)));
FileUtils.writeStringToFile(new File(ConfigUtils.workerArtifactsPidPath(conf, topologyId, port)), pid, Charset.forName("UTF-8"));
}
ClusterStateContext csContext = new ClusterStateContext(DaemonType.WORKER, topologyConf);
IStateStorage stateStorage = ClusterUtils.mkStateStorage(conf, topologyConf, csContext);
IStormClusterState stormClusterState = ClusterUtils.mkStormClusterState(stateStorage, null, csContext);
metricRegistry.start(topologyConf, port);
SharedMetricRegistries.add(WORKER_METRICS_REGISTRY, metricRegistry.getRegistry());
Credentials initialCredentials = stormClusterState.credentials(topologyId, null);
Map<String, String> initCreds = new HashMap<>();
if (initialCredentials != null) {
initCreds.putAll(initialCredentials.get_creds());
}
autoCreds = ClientAuthUtils.getAutoCredentials(topologyConf);
subject = ClientAuthUtils.populateSubject(null, autoCreds, initCreds);
Subject.doAs(subject, (PrivilegedExceptionAction<Object>) () -> loadWorker(stateStorage, stormClusterState, initCreds, initialCredentials));
}
use of org.apache.storm.cluster.ClusterStateContext in project storm by apache.
the class LocalFsBlobStore method prepare.
@Override
public void prepare(Map<String, Object> conf, String overrideBase, NimbusInfo nimbusInfo, ILeaderElector leaderElector) {
this.conf = conf;
this.nimbusInfo = nimbusInfo;
zkClient = BlobStoreUtils.createZKClient(conf, DaemonType.NIMBUS);
if (overrideBase == null) {
overrideBase = ConfigUtils.absoluteStormBlobStoreDir(conf);
}
File baseDir = new File(overrideBase, BASE_BLOBS_DIR_NAME);
try {
fbs = new FileBlobStoreImpl(baseDir, conf);
} catch (IOException e) {
throw new RuntimeException(e);
}
aclHandler = new BlobStoreAclHandler(conf);
try {
this.stormClusterState = ClusterUtils.mkStormClusterState(conf, new ClusterStateContext(DaemonType.NIMBUS, conf));
} catch (Exception e) {
e.printStackTrace();
}
timer = new Timer("BLOB-STORE-TIMER", true);
this.leaderElector = leaderElector;
}
Aggregations