use of org.apache.storm.generated.GlobalStreamId in project storm by apache.
the class StatefulWindowedBoltExecutorTest method testRecovery.
@Test
public void testRecovery() throws Exception {
mockStormConf.put(Config.TOPOLOGY_BOLTS_MESSAGE_ID_FIELD_NAME, "msgid");
mockStormConf.put(Config.TOPOLOGY_BOLTS_WINDOW_LENGTH_COUNT, 5);
mockStormConf.put(Config.TOPOLOGY_BOLTS_SLIDING_INTERVAL_COUNT, 5);
KeyValueState<TaskStream, WindowState> mockState;
mockState = Mockito.mock(KeyValueState.class);
Map<GlobalStreamId, Grouping> mockMap = Mockito.mock(Map.class);
Mockito.when(mockTopologyContext.getThisSources()).thenReturn(mockMap);
Mockito.when(mockTopologyContext.getComponentTasks(Mockito.anyString())).thenReturn(Collections.singletonList(1));
Mockito.when(mockMap.keySet()).thenReturn(Collections.singleton(new GlobalStreamId("a", "s")));
WindowState mockWindowState = new WindowState(4, 4);
Mockito.when(mockState.get(Mockito.any(TaskStream.class))).thenReturn(mockWindowState);
executor.prepare(mockStormConf, mockTopologyContext, mockOutputCollector, mockState);
executor.initState(null);
List<Tuple> tuples = getMockTuples(10);
for (Tuple tuple : tuples) {
executor.execute(tuple);
}
WindowState expectedState = new WindowState(4, 9);
Mockito.verify(mockState, Mockito.times(1)).put(Mockito.any(TaskStream.class), Mockito.eq(expectedState));
}
use of org.apache.storm.generated.GlobalStreamId in project storm by apache.
the class TopologyBuilderTest method testStatefulTopology.
// TODO enable if setStateSpout gets implemented
// @Test(expected = IllegalArgumentException.class)
// public void testSetStateSpout() {
// builder.setStateSpout("stateSpout", mock(IRichStateSpout.class), 0);
// }
@Test
public void testStatefulTopology() {
builder.setSpout("spout1", makeDummySpout());
builder.setSpout("spout2", makeDummySpout());
builder.setBolt("bolt1", makeDummyStatefulBolt(), 1).shuffleGrouping("spout1").shuffleGrouping("spout2");
builder.setBolt("bolt2", makeDummyStatefulBolt(), 1).shuffleGrouping("spout1");
builder.setBolt("bolt3", makeDummyStatefulBolt(), 1).shuffleGrouping("bolt1").shuffleGrouping("bolt2");
StormTopology topology = builder.createTopology();
Assert.assertNotNull(topology);
Set<String> spouts = topology.get_spouts().keySet();
// checkpoint spout should 've been added
Assert.assertEquals(ImmutableSet.of("spout1", "spout2", "$checkpointspout"), spouts);
// bolt1, bolt2 should also receive from checkpoint spout
Assert.assertEquals(ImmutableSet.of(new GlobalStreamId("spout1", "default"), new GlobalStreamId("spout2", "default"), new GlobalStreamId("$checkpointspout", "$checkpoint")), topology.get_bolts().get("bolt1").get_common().get_inputs().keySet());
Assert.assertEquals(ImmutableSet.of(new GlobalStreamId("spout1", "default"), new GlobalStreamId("$checkpointspout", "$checkpoint")), topology.get_bolts().get("bolt2").get_common().get_inputs().keySet());
// bolt3 should also receive from checkpoint streams of bolt1, bolt2
Assert.assertEquals(ImmutableSet.of(new GlobalStreamId("bolt1", "default"), new GlobalStreamId("bolt1", "$checkpoint"), new GlobalStreamId("bolt2", "default"), new GlobalStreamId("bolt2", "$checkpoint")), topology.get_bolts().get("bolt3").get_common().get_inputs().keySet());
}
use of org.apache.storm.generated.GlobalStreamId in project storm by apache.
the class StreamBuilderTest method testGlobalAggregate.
@Test
public void testGlobalAggregate() throws Exception {
Stream<String> stream = streamBuilder.newStream(newSpout(Utils.DEFAULT_STREAM_ID), new ValueMapper<>(0), 2);
stream.aggregate(new Count<>());
StormTopology topology = streamBuilder.build();
assertEquals(2, topology.get_bolts_size());
Bolt bolt1 = topology.get_bolts().get("bolt1");
Bolt bolt2 = topology.get_bolts().get("bolt2");
String spoutId = topology.get_spouts().keySet().iterator().next();
Map<GlobalStreamId, Grouping> expected1 = new HashMap<>();
expected1.put(new GlobalStreamId(spoutId, "default"), Grouping.shuffle(new NullStruct()));
Map<GlobalStreamId, Grouping> expected2 = new HashMap<>();
expected2.put(new GlobalStreamId("bolt1", "s1"), Grouping.fields(Collections.emptyList()));
expected2.put(new GlobalStreamId("bolt1", "s1__punctuation"), Grouping.all(new NullStruct()));
assertEquals(expected1, bolt1.get_common().get_inputs());
assertEquals(expected2, bolt2.get_common().get_inputs());
}
use of org.apache.storm.generated.GlobalStreamId in project storm by apache.
the class StreamBuilderTest method testSpoutToBolt.
@Test
public void testSpoutToBolt() throws Exception {
Stream<Tuple> stream = streamBuilder.newStream(newSpout(Utils.DEFAULT_STREAM_ID));
stream.to(newBolt());
StormTopology topology = streamBuilder.build();
assertEquals(1, topology.get_spouts_size());
assertEquals(1, topology.get_bolts_size());
String spoutId = topology.get_spouts().keySet().iterator().next();
Map<GlobalStreamId, Grouping> expected = new HashMap<>();
expected.put(new GlobalStreamId(spoutId, "default"), Grouping.shuffle(new NullStruct()));
assertEquals(expected, topology.get_bolts().values().iterator().next().get_common().get_inputs());
}
use of org.apache.storm.generated.GlobalStreamId in project storm by apache.
the class TridentTopologyBuilder method buildTopology.
public StormTopology buildTopology(Map<String, Number> masterCoordResources) {
TopologyBuilder builder = new TopologyBuilder();
Map<GlobalStreamId, String> batchIdsForSpouts = fleshOutStreamBatchIds(false);
Map<GlobalStreamId, String> batchIdsForBolts = fleshOutStreamBatchIds(true);
Map<String, List<String>> batchesToCommitIds = new HashMap<>();
Map<String, List<ITridentSpout>> batchesToSpouts = new HashMap<>();
for (String id : _spouts.keySet()) {
TransactionalSpoutComponent c = _spouts.get(id);
if (c.spout instanceof IRichSpout) {
//TODO: wrap this to set the stream name
builder.setSpout(id, (IRichSpout) c.spout, c.parallelism);
} else {
String batchGroup = c.batchGroupId;
if (!batchesToCommitIds.containsKey(batchGroup)) {
batchesToCommitIds.put(batchGroup, new ArrayList<String>());
}
batchesToCommitIds.get(batchGroup).add(c.commitStateId);
if (!batchesToSpouts.containsKey(batchGroup)) {
batchesToSpouts.put(batchGroup, new ArrayList<ITridentSpout>());
}
batchesToSpouts.get(batchGroup).add((ITridentSpout) c.spout);
BoltDeclarer scd = builder.setBolt(spoutCoordinator(id), new TridentSpoutCoordinator(c.commitStateId, (ITridentSpout) c.spout)).globalGrouping(masterCoordinator(c.batchGroupId), MasterBatchCoordinator.BATCH_STREAM_ID).globalGrouping(masterCoordinator(c.batchGroupId), MasterBatchCoordinator.SUCCESS_STREAM_ID);
for (Map<String, Object> m : c.componentConfs) {
scd.addConfigurations(m);
}
Map<String, TridentBoltExecutor.CoordSpec> specs = new HashMap();
specs.put(c.batchGroupId, new CoordSpec());
BoltDeclarer bd = builder.setBolt(id, new TridentBoltExecutor(new TridentSpoutExecutor(c.commitStateId, c.streamName, ((ITridentSpout) c.spout)), batchIdsForSpouts, specs), c.parallelism);
bd.allGrouping(spoutCoordinator(id), MasterBatchCoordinator.BATCH_STREAM_ID);
bd.allGrouping(masterCoordinator(batchGroup), MasterBatchCoordinator.SUCCESS_STREAM_ID);
if (c.spout instanceof ICommitterTridentSpout) {
bd.allGrouping(masterCoordinator(batchGroup), MasterBatchCoordinator.COMMIT_STREAM_ID);
}
for (Map<String, Object> m : c.componentConfs) {
bd.addConfigurations(m);
}
}
}
for (String id : _batchPerTupleSpouts.keySet()) {
SpoutComponent c = _batchPerTupleSpouts.get(id);
SpoutDeclarer d = builder.setSpout(id, new RichSpoutBatchTriggerer((IRichSpout) c.spout, c.streamName, c.batchGroupId), c.parallelism);
for (Map<String, Object> conf : c.componentConfs) {
d.addConfigurations(conf);
}
}
Number onHeap = masterCoordResources.get(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB);
Number offHeap = masterCoordResources.get(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB);
Number cpuLoad = masterCoordResources.get(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT);
for (String batch : batchesToCommitIds.keySet()) {
List<String> commitIds = batchesToCommitIds.get(batch);
SpoutDeclarer masterCoord = builder.setSpout(masterCoordinator(batch), new MasterBatchCoordinator(commitIds, batchesToSpouts.get(batch)));
if (onHeap != null) {
if (offHeap != null) {
masterCoord.setMemoryLoad(onHeap, offHeap);
} else {
masterCoord.setMemoryLoad(onHeap);
}
}
if (cpuLoad != null) {
masterCoord.setCPULoad(cpuLoad);
}
}
for (String id : _bolts.keySet()) {
Component c = _bolts.get(id);
Map<String, CoordSpec> specs = new HashMap<>();
for (GlobalStreamId s : getBoltSubscriptionStreams(id)) {
String batch = batchIdsForBolts.get(s);
if (!specs.containsKey(batch))
specs.put(batch, new CoordSpec());
CoordSpec spec = specs.get(batch);
CoordType ct;
if (_batchPerTupleSpouts.containsKey(s.get_componentId())) {
ct = CoordType.single();
} else {
ct = CoordType.all();
}
spec.coords.put(s.get_componentId(), ct);
}
for (String b : c.committerBatches) {
specs.get(b).commitStream = new GlobalStreamId(masterCoordinator(b), MasterBatchCoordinator.COMMIT_STREAM_ID);
}
BoltDeclarer d = builder.setBolt(id, new TridentBoltExecutor(c.bolt, batchIdsForBolts, specs), c.parallelism);
for (Map<String, Object> conf : c.componentConfs) {
d.addConfigurations(conf);
}
for (InputDeclaration inputDecl : c.declarations) {
inputDecl.declare(d);
}
Map<String, Set<String>> batchToComponents = getBoltBatchToComponentSubscriptions(id);
for (Map.Entry<String, Set<String>> entry : batchToComponents.entrySet()) {
for (String comp : entry.getValue()) {
d.directGrouping(comp, TridentBoltExecutor.COORD_STREAM(entry.getKey()));
}
}
for (String b : c.committerBatches) {
d.allGrouping(masterCoordinator(b), MasterBatchCoordinator.COMMIT_STREAM_ID);
}
}
return builder.createTopology();
}
Aggregations