use of org.apache.storm.cluster.IStormClusterState in project storm by apache.
the class Nimbus method setupBlobstore.
/**
* Sets up blobstore state for all current keys.
* @throws KeyNotFoundException
* @throws AuthorizationException
*/
private void setupBlobstore() throws AuthorizationException, KeyNotFoundException {
IStormClusterState state = stormClusterState;
BlobStore store = blobStore;
Set<String> localKeys = new HashSet<>();
for (Iterator<String> it = store.listKeys(); it.hasNext(); ) {
localKeys.add(it.next());
}
Set<String> activeKeys = new HashSet<>(state.activeKeys());
Set<String> activeLocalKeys = new HashSet<>(localKeys);
activeLocalKeys.retainAll(activeKeys);
Set<String> keysToDelete = new HashSet<>(localKeys);
keysToDelete.removeAll(activeKeys);
NimbusInfo nimbusInfo = nimbusHostPortInfo;
LOG.debug("Deleting keys not on the zookeeper {}", keysToDelete);
for (String toDelete : keysToDelete) {
store.deleteBlob(toDelete, NIMBUS_SUBJECT);
}
LOG.debug("Creating list of key entries for blobstore inside zookeeper {} local {}", activeKeys, activeLocalKeys);
for (String key : activeLocalKeys) {
try {
state.setupBlobstore(key, nimbusInfo, getVersionForKey(key, nimbusInfo, conf));
} catch (KeyNotFoundException e) {
// invalid key, remove it from blobstore
store.deleteBlob(key, NIMBUS_SUBJECT);
}
}
}
use of org.apache.storm.cluster.IStormClusterState in project storm by apache.
the class Nimbus method tryReadTopoConfFromName.
private Map<String, Object> tryReadTopoConfFromName(final String topoName) throws NotAliveException, AuthorizationException, IOException {
IStormClusterState state = stormClusterState;
String topoId = state.getTopoId(topoName).orElseThrow(() -> new NotAliveException(topoName + " is not alive"));
return tryReadTopoConf(topoId, blobStore);
}
use of org.apache.storm.cluster.IStormClusterState in project storm by apache.
the class Nimbus method blobSync.
private void blobSync() throws Exception {
if ("distributed".equals(conf.get(Config.STORM_CLUSTER_MODE))) {
if (!isLeader()) {
IStormClusterState state = stormClusterState;
NimbusInfo nimbusInfo = nimbusHostPortInfo;
BlobStore store = blobStore;
Set<String> allKeys = new HashSet<>();
for (Iterator<String> it = store.listKeys(); it.hasNext(); ) {
allKeys.add(it.next());
}
Set<String> zkKeys = new HashSet<>(state.blobstore(() -> {
try {
this.blobSync();
} catch (Exception e) {
throw new RuntimeException(e);
}
}));
LOG.debug("blob-sync blob-store-keys {} zookeeper-keys {}", allKeys, zkKeys);
BlobSynchronizer sync = new BlobSynchronizer(store, conf);
sync.setNimbusInfo(nimbusInfo);
sync.setBlobStoreKeySet(allKeys);
sync.setZookeeperKeySet(zkKeys);
sync.syncBlobs();
}
//else not leader (NOOP)
}
//else local (NOOP)
}
use of org.apache.storm.cluster.IStormClusterState in project storm by apache.
the class Nimbus method getTopologyInfoWithOpts.
@Override
public TopologyInfo getTopologyInfoWithOpts(String topoId, GetInfoOptions options) throws NotAliveException, AuthorizationException, TException {
try {
getTopologyInfoWithOptsCalls.mark();
CommonTopoInfo common = getCommonTopoInfo(topoId, "getTopologyInfo");
if (common.base == null) {
throw new NotAliveException(topoId);
}
IStormClusterState state = stormClusterState;
NumErrorsChoice numErrChoice = OR(options.get_num_err_choice(), NumErrorsChoice.ALL);
Map<String, List<ErrorInfo>> errors = new HashMap<>();
for (String component : common.allComponents) {
switch(numErrChoice) {
case NONE:
errors.put(component, Collections.emptyList());
break;
case ONE:
List<ErrorInfo> errList = new ArrayList<>();
ErrorInfo info = state.lastError(topoId, component);
if (info != null) {
errList.add(info);
}
errors.put(component, errList);
break;
case ALL:
errors.put(component, state.errors(topoId, component));
break;
default:
LOG.warn("Got invalid NumErrorsChoice '{}'", numErrChoice);
errors.put(component, state.errors(topoId, component));
break;
}
}
List<ExecutorSummary> summaries = new ArrayList<>();
if (common.assignment != null) {
for (Entry<List<Long>, NodeInfo> entry : common.assignment.get_executor_node_port().entrySet()) {
NodeInfo ni = entry.getValue();
ExecutorInfo execInfo = toExecInfo(entry.getKey());
String host = entry.getValue().get_node();
Map<String, Object> heartbeat = common.beats.get(StatsUtil.convertExecutor(entry.getKey()));
if (heartbeat == null) {
heartbeat = Collections.emptyMap();
}
ExecutorSummary summ = new ExecutorSummary(execInfo, common.taskToComponent.get(execInfo.get_task_start()), ni.get_node(), ni.get_port_iterator().next().intValue(), (Integer) heartbeat.getOrDefault("uptime", 0));
//heartbeats "stats"
Map<String, Object> hb = (Map<String, Object>) heartbeat.get("heartbeat");
if (hb != null) {
Map ex = (Map) hb.get("stats");
if (ex != null) {
ExecutorStats stats = StatsUtil.thriftifyExecutorStats(ex);
summ.set_stats(stats);
}
}
summaries.add(summ);
}
}
TopologyInfo topoInfo = new TopologyInfo(topoId, common.topoName, Time.deltaSecs(common.launchTimeSecs), summaries, extractStatusStr(common.base), errors);
if (common.base.is_set_owner()) {
topoInfo.set_owner(common.base.get_owner());
}
String schedStatus = idToSchedStatus.get().get(topoId);
if (schedStatus != null) {
topoInfo.set_sched_status(schedStatus);
}
TopologyResources resources = getResourcesForTopology(topoId, common.base);
if (resources != null) {
topoInfo.set_requested_memonheap(resources.getRequestedMemOnHeap());
topoInfo.set_requested_memoffheap(resources.getRequestedMemOffHeap());
topoInfo.set_requested_cpu(resources.getRequestedCpu());
topoInfo.set_assigned_memonheap(resources.getAssignedMemOnHeap());
topoInfo.set_assigned_memoffheap(resources.getAssignedMemOffHeap());
topoInfo.set_assigned_cpu(resources.getAssignedCpu());
}
if (common.base.is_set_component_debug()) {
topoInfo.set_component_debug(common.base.get_component_debug());
}
topoInfo.set_replication_count(getBlobReplicationCount(ConfigUtils.masterStormCodeKey(topoId)));
return topoInfo;
} catch (Exception e) {
LOG.warn("Get topo info exception. (topology id='{}')", topoId, e);
if (e instanceof TException) {
throw (TException) e;
}
throw new RuntimeException(e);
}
}
use of org.apache.storm.cluster.IStormClusterState in project storm by apache.
the class Testing method completeTopology.
/**
* Run a topology to completion capturing all of the messages that are emitted. This only works when all of the spouts are
* instances of {@link org.apache.storm.testing.CompletableSpout} or are overwritten by MockedSources in param
* @param cluster the cluster to submit the topology to
* @param topology the topology itself
* @param param parameters to describe how to complete a topology
* @return a map of the component to the list of tuples it emitted
* @throws TException on any error from nimbus.
*/
public static Map<String, List<FixedTuple>> completeTopology(ILocalCluster cluster, StormTopology topology, CompleteTopologyParam param) throws TException, InterruptedException {
Map<String, List<FixedTuple>> ret = null;
CapturedTopology<StormTopology> capTopo = captureTopology(topology);
topology = capTopo.topology;
String topoName = param.getTopologyName();
if (topoName == null) {
topoName = "topologytest-" + Utils.uuid();
}
Map<String, SpoutSpec> spouts = topology.get_spouts();
MockedSources ms = param.getMockedSources();
if (ms != null) {
for (Entry<String, List<FixedTuple>> mocked : ms.getData().entrySet()) {
FixedTupleSpout newSpout = new FixedTupleSpout(mocked.getValue());
spouts.get(mocked.getKey()).set_spout_object(Thrift.serializeComponentObject(newSpout));
}
}
List<Object> spoutObjects = spouts.values().stream().map((spec) -> Thrift.deserializeComponentObject(spec.get_spout_object())).collect(Collectors.toList());
for (Object o : spoutObjects) {
if (!(o instanceof CompletableSpout)) {
throw new RuntimeException("Cannot complete topology unless every spout is a CompletableSpout (or mocked to be); failed by " + o);
}
}
for (Object spout : spoutObjects) {
((CompletableSpout) spout).startup();
}
cluster.submitTopology(topoName, param.getStormConf(), topology);
if (Time.isSimulating()) {
cluster.advanceClusterTime(11);
}
IStormClusterState state = cluster.getClusterState();
String topoId = state.getTopoId(topoName).get();
// Give the topology time to come up without using it to wait for the spouts to complete
simulateWait(cluster);
Integer timeoutMs = param.getTimeoutMs();
if (timeoutMs == null) {
timeoutMs = TEST_TIMEOUT_MS;
}
whileTimeout(timeoutMs, () -> !isEvery(spoutObjects, (o) -> ((CompletableSpout) o).isExhausted()), () -> {
try {
simulateWait(cluster);
} catch (Exception e) {
throw new RuntimeException();
}
});
KillOptions killOpts = new KillOptions();
killOpts.set_wait_secs(0);
cluster.killTopologyWithOpts(topoName, killOpts);
whileTimeout(timeoutMs, () -> state.assignmentInfo(topoId, null) != null, () -> {
try {
simulateWait(cluster);
} catch (Exception e) {
throw new RuntimeException();
}
});
if (param.getCleanupState()) {
for (Object o : spoutObjects) {
((CompletableSpout) o).clean();
}
ret = capTopo.capturer.getAndRemoveResults();
} else {
ret = capTopo.capturer.getAndClearResults();
}
return ret;
}
Aggregations