use of org.apache.storm.generated.StormTopology in project storm by apache.
the class CaptureLoad method captureTopology.
static TopologyLoadConf captureTopology(Nimbus.Iface client, TopologySummary topologySummary) throws Exception {
String topologyName = topologySummary.get_name();
LOG.info("Capturing {}...", topologyName);
String topologyId = topologySummary.get_id();
TopologyInfo info = client.getTopologyInfo(topologyId);
TopologyPageInfo tpinfo = client.getTopologyPageInfo(topologyId, ":all-time", false);
@SuppressWarnings("checkstyle:VariableDeclarationUsageDistance") StormTopology topo = client.getUserTopology(topologyId);
// Done capturing topology information...
Map<String, Object> savedTopoConf = new HashMap<>();
Map<String, Object> topoConf = (Map<String, Object>) JSONValue.parse(client.getTopologyConf(topologyId));
for (String key : TopologyLoadConf.IMPORTANT_CONF_KEYS) {
Object o = topoConf.get(key);
if (o != null) {
savedTopoConf.put(key, o);
LOG.info("with config {}: {}", key, o);
}
}
// Lets use the number of actually scheduled workers as a way to bridge RAS and non-RAS
int numWorkers = tpinfo.get_num_workers();
if (savedTopoConf.containsKey(Config.TOPOLOGY_WORKERS)) {
numWorkers = Math.max(numWorkers, ((Number) savedTopoConf.get(Config.TOPOLOGY_WORKERS)).intValue());
}
savedTopoConf.put(Config.TOPOLOGY_WORKERS, numWorkers);
Map<String, LoadCompConf.Builder> boltBuilders = new HashMap<>();
Map<String, LoadCompConf.Builder> spoutBuilders = new HashMap<>();
List<InputStream.Builder> inputStreams = new ArrayList<>();
Map<GlobalStreamId, OutputStream.Builder> outStreams = new HashMap<>();
// Bolts
if (topo.get_bolts() != null) {
for (Map.Entry<String, Bolt> boltSpec : topo.get_bolts().entrySet()) {
String boltComp = boltSpec.getKey();
LOG.info("Found bolt {}...", boltComp);
Bolt bolt = boltSpec.getValue();
ComponentCommon common = bolt.get_common();
Map<GlobalStreamId, Grouping> inputs = common.get_inputs();
if (inputs != null) {
for (Map.Entry<GlobalStreamId, Grouping> input : inputs.entrySet()) {
GlobalStreamId id = input.getKey();
LOG.info("with input {}...", id);
Grouping grouping = input.getValue();
InputStream.Builder builder = new InputStream.Builder().withId(id.get_streamId()).withFromComponent(id.get_componentId()).withToComponent(boltComp).withGroupingType(grouping);
inputStreams.add(builder);
}
}
Map<String, StreamInfo> outputs = common.get_streams();
if (outputs != null) {
for (String name : outputs.keySet()) {
GlobalStreamId id = new GlobalStreamId(boltComp, name);
LOG.info("and output {}...", id);
OutputStream.Builder builder = new OutputStream.Builder().withId(name);
outStreams.put(id, builder);
}
}
LoadCompConf.Builder builder = new LoadCompConf.Builder().withParallelism(common.get_parallelism_hint()).withId(boltComp);
boltBuilders.put(boltComp, builder);
}
Map<String, Map<String, Double>> boltResources = getBoltsResources(topo, topoConf);
for (Map.Entry<String, Map<String, Double>> entry : boltResources.entrySet()) {
LoadCompConf.Builder bd = boltBuilders.get(entry.getKey());
if (bd != null) {
Map<String, Double> resources = entry.getValue();
Double cpu = resources.get(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT);
if (cpu != null) {
bd.withCpuLoad(cpu);
}
Double mem = resources.get(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB);
if (mem != null) {
bd.withMemoryLoad(mem);
}
}
}
}
// Spouts
if (topo.get_spouts() != null) {
for (Map.Entry<String, SpoutSpec> spoutSpec : topo.get_spouts().entrySet()) {
String spoutComp = spoutSpec.getKey();
LOG.info("Found Spout {}...", spoutComp);
SpoutSpec spout = spoutSpec.getValue();
ComponentCommon common = spout.get_common();
Map<String, StreamInfo> outputs = common.get_streams();
if (outputs != null) {
for (String name : outputs.keySet()) {
GlobalStreamId id = new GlobalStreamId(spoutComp, name);
LOG.info("with output {}...", id);
OutputStream.Builder builder = new OutputStream.Builder().withId(name);
outStreams.put(id, builder);
}
}
LoadCompConf.Builder builder = new LoadCompConf.Builder().withParallelism(common.get_parallelism_hint()).withId(spoutComp);
spoutBuilders.put(spoutComp, builder);
}
Map<String, Map<String, Double>> spoutResources = getSpoutsResources(topo, topoConf);
for (Map.Entry<String, Map<String, Double>> entry : spoutResources.entrySet()) {
LoadCompConf.Builder sd = spoutBuilders.get(entry.getKey());
if (sd != null) {
Map<String, Double> resources = entry.getValue();
Double cpu = resources.get(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT);
if (cpu != null) {
sd.withCpuLoad(cpu);
}
Double mem = resources.get(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB);
if (mem != null) {
sd.withMemoryLoad(mem);
}
}
}
}
// Stats...
Map<String, List<ExecutorSummary>> byComponent = new HashMap<>();
for (ExecutorSummary executor : info.get_executors()) {
String component = executor.get_component_id();
List<ExecutorSummary> list = byComponent.get(component);
if (list == null) {
list = new ArrayList<>();
byComponent.put(component, list);
}
list.add(executor);
}
List<InputStream> streams = new ArrayList<>(inputStreams.size());
// Compute the stats for the different input streams
for (InputStream.Builder builder : inputStreams) {
GlobalStreamId streamId = new GlobalStreamId(builder.getFromComponent(), builder.getId());
List<ExecutorSummary> summaries = byComponent.get(builder.getToComponent());
// Execute and process latency...
builder.withProcessTime(new NormalDistStats(extractBoltValues(summaries, streamId, BoltStats::get_process_ms_avg)));
builder.withExecTime(new NormalDistStats(extractBoltValues(summaries, streamId, BoltStats::get_execute_ms_avg)));
// InputStream is done
streams.add(builder.build());
}
// There is a bug in some versions that returns 0 for the uptime.
// To work around it we should get it an alternative (working) way.
Map<String, Integer> workerToUptime = new HashMap<>();
for (WorkerSummary ws : tpinfo.get_workers()) {
workerToUptime.put(ws.get_supervisor_id() + ":" + ws.get_port(), ws.get_uptime_secs());
}
LOG.debug("WORKER TO UPTIME {}", workerToUptime);
for (Map.Entry<GlobalStreamId, OutputStream.Builder> entry : outStreams.entrySet()) {
OutputStream.Builder builder = entry.getValue();
GlobalStreamId id = entry.getKey();
List<Double> emittedRate = new ArrayList<>();
List<ExecutorSummary> summaries = byComponent.get(id.get_componentId());
if (summaries != null) {
for (ExecutorSummary summary : summaries) {
if (summary.is_set_stats()) {
int uptime = summary.get_uptime_secs();
LOG.debug("UPTIME {}", uptime);
if (uptime <= 0) {
// Likely it is because of a bug, so try to get it another way
String key = summary.get_host() + ":" + summary.get_port();
uptime = workerToUptime.getOrDefault(key, 1);
LOG.debug("Getting uptime for worker {}, {}", key, uptime);
}
for (Map.Entry<String, Map<String, Long>> statEntry : summary.get_stats().get_emitted().entrySet()) {
String timeWindow = statEntry.getKey();
long timeSecs = uptime;
try {
timeSecs = Long.valueOf(timeWindow);
} catch (NumberFormatException e) {
// Ignored...
}
timeSecs = Math.min(timeSecs, uptime);
Long count = statEntry.getValue().get(id.get_streamId());
if (count != null) {
LOG.debug("{} emitted {} for {} secs or {} tuples/sec", id, count, timeSecs, count.doubleValue() / timeSecs);
emittedRate.add(count.doubleValue() / timeSecs);
}
}
}
}
}
builder.withRate(new NormalDistStats(emittedRate));
// The OutputStream is done
LoadCompConf.Builder comp = boltBuilders.get(id.get_componentId());
if (comp == null) {
comp = spoutBuilders.get(id.get_componentId());
}
comp.withStream(builder.build());
}
List<LoadCompConf> spouts = spoutBuilders.values().stream().map((b) -> b.build()).collect(Collectors.toList());
List<LoadCompConf> bolts = boltBuilders.values().stream().map((b) -> b.build()).collect(Collectors.toList());
return new TopologyLoadConf(topologyName, savedTopoConf, spouts, bolts, streams);
}
use of org.apache.storm.generated.StormTopology in project storm by apache.
the class StormSqlLocalClusterImpl method runLocal.
public void runLocal(LocalCluster localCluster, Iterable<String> statements, Predicate<Void> waitCondition, long waitTimeoutMs) throws Exception {
final Config conf = new Config();
conf.setMaxSpoutPending(20);
for (String sql : statements) {
StormParser parser = new StormParser(sql);
SqlNode node = parser.impl().parseSqlStmtEof();
if (node instanceof SqlCreateTable) {
sqlContext.interpretCreateTable((SqlCreateTable) node);
} else if (node instanceof SqlCreateFunction) {
sqlContext.interpretCreateFunction((SqlCreateFunction) node);
} else {
AbstractStreamsProcessor processor = sqlContext.compileSql(sql);
StormTopology topo = processor.build();
if (processor.getClassLoaders() != null && processor.getClassLoaders().size() > 0) {
CompilingClassLoader lastClassloader = processor.getClassLoaders().get(processor.getClassLoaders().size() - 1);
Utils.setClassLoaderForJavaDeSerialize(lastClassloader);
}
try (LocalCluster.LocalTopology stormTopo = localCluster.submitTopology("storm-sql", conf, topo)) {
waitForCompletion(waitTimeoutMs, waitCondition);
} finally {
while (localCluster.getTopologySummaries().size() > 0) {
Thread.sleep(10);
}
Utils.resetClassLoaderForJavaDeSerialize();
}
}
}
}
use of org.apache.storm.generated.StormTopology in project storm by apache.
the class StreamBuilderTest method testMultiPartitionByKeyWithRepartition.
@Test
public void testMultiPartitionByKeyWithRepartition() {
TopologyContext mockContext = Mockito.mock(TopologyContext.class);
OutputCollector mockCollector = Mockito.mock(OutputCollector.class);
Map<GlobalStreamId, Grouping> expected = new HashMap<>();
expected.put(new GlobalStreamId("bolt2", "s3"), Grouping.fields(Collections.singletonList("key")));
expected.put(new GlobalStreamId("bolt2", "s3__punctuation"), Grouping.all(new NullStruct()));
Stream<Integer> stream = streamBuilder.newStream(newSpout(Utils.DEFAULT_STREAM_ID), new ValueMapper<>(0));
stream.mapToPair(x -> Pair.of(x, x)).window(TumblingWindows.of(BaseWindowedBolt.Count.of(10))).reduceByKey((x, y) -> x + y).repartition(10).reduceByKey((x, y) -> 0).print();
StormTopology topology = streamBuilder.build();
assertEquals(3, topology.get_bolts_size());
assertEquals(expected, topology.get_bolts().get("bolt3").get_common().get_inputs());
}
use of org.apache.storm.generated.StormTopology in project storm by apache.
the class StreamBuilderTest method testBranch.
@Test
public void testBranch() throws Exception {
Stream<Tuple> stream = streamBuilder.newStream(newSpout(Utils.DEFAULT_STREAM_ID));
Stream<Tuple>[] streams = stream.branch(x -> true);
StormTopology topology = streamBuilder.build();
assertEquals(1, topology.get_spouts_size());
assertEquals(1, topology.get_bolts_size());
Map<GlobalStreamId, Grouping> expected = new HashMap<>();
String spoutId = topology.get_spouts().keySet().iterator().next();
expected.put(new GlobalStreamId(spoutId, "default"), Grouping.shuffle(new NullStruct()));
assertEquals(expected, topology.get_bolts().values().iterator().next().get_common().get_inputs());
assertEquals(1, streams.length);
assertEquals(1, streams[0].node.getOutputStreams().size());
String parentStream = streams[0].node.getOutputStreams().iterator().next() + "-branch";
assertEquals(1, streams[0].node.getParents(parentStream).size());
Node processorNdoe = streams[0].node.getParents(parentStream).iterator().next();
assertTrue(processorNdoe instanceof ProcessorNode);
assertTrue(((ProcessorNode) processorNdoe).getProcessor() instanceof BranchProcessor);
assertTrue(processorNdoe.getParents("default").iterator().next() instanceof SpoutNode);
}
use of org.apache.storm.generated.StormTopology in project storm by apache.
the class StreamBuilderTest method testBranchAndJoin.
@Test
public void testBranchAndJoin() throws Exception {
TopologyContext mockContext = Mockito.mock(TopologyContext.class);
OutputCollector mockCollector = Mockito.mock(OutputCollector.class);
Stream<Integer> stream = streamBuilder.newStream(newSpout(Utils.DEFAULT_STREAM_ID), new ValueMapper<>(0), 2);
Stream<Integer>[] streams = stream.branch(x -> x % 2 == 0, x -> x % 2 == 1);
PairStream<Integer, Pair<Integer, Integer>> joined = streams[0].mapToPair(x -> Pair.of(x, 1)).join(streams[1].mapToPair(x -> Pair.of(x, 1)));
assertTrue(joined.getNode() instanceof ProcessorNode);
StormTopology topology = streamBuilder.build();
assertEquals(2, topology.get_bolts_size());
}
Aggregations