use of org.apache.storm.topology.IRichBolt in project flink by apache.
the class BoltWrapperTest method testClose.
@SuppressWarnings("unchecked")
@Test
public void testClose() throws Exception {
final IRichBolt bolt = mock(IRichBolt.class);
final SetupOutputFieldsDeclarer declarer = new SetupOutputFieldsDeclarer();
declarer.declare(new Fields("dummy"));
PowerMockito.whenNew(SetupOutputFieldsDeclarer.class).withNoArguments().thenReturn(declarer);
final BoltWrapper<Object, Object> wrapper = new BoltWrapper<Object, Object>(bolt);
wrapper.setup(createMockStreamTask(), new StreamConfig(new Configuration()), mock(Output.class));
wrapper.close();
wrapper.dispose();
verify(bolt).cleanup();
}
use of org.apache.storm.topology.IRichBolt in project flink by apache.
the class WrapperSetupHelper method createTopologyContext.
/**
* Creates a {@link TopologyContext} for a Spout or Bolt instance (ie, Flink task / Storm executor).
*
* @param context
* The Flink runtime context.
* @param spoutOrBolt
* The Spout or Bolt this context is created for.
* @param stormTopology
* The original Storm topology.
* @param stormConfig
* The user provided configuration.
* @return The created {@link TopologyContext}.
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
static synchronized TopologyContext createTopologyContext(final StreamingRuntimeContext context, final IComponent spoutOrBolt, final String operatorName, StormTopology stormTopology, final Map stormConfig) {
final int dop = context.getNumberOfParallelSubtasks();
final Map<Integer, String> taskToComponents = new HashMap<Integer, String>();
final Map<String, List<Integer>> componentToSortedTasks = new HashMap<String, List<Integer>>();
final Map<String, Map<String, Fields>> componentToStreamToFields = new HashMap<String, Map<String, Fields>>();
String stormId = (String) stormConfig.get(TOPOLOGY_NAME);
// not supported
String codeDir = null;
// not supported
String pidDir = null;
Integer taskId = -1;
// not supported
Integer workerPort = null;
List<Integer> workerTasks = new ArrayList<Integer>();
final Map<String, Object> defaultResources = new HashMap<String, Object>();
final Map<String, Object> userResources = new HashMap<String, Object>();
final Map<String, Object> executorData = new HashMap<String, Object>();
final Map registeredMetrics = new HashMap();
Atom openOrPrepareWasCalled = null;
if (stormTopology == null) {
// embedded mode
ComponentCommon common = new ComponentCommon();
common.set_parallelism_hint(dop);
HashMap<String, SpoutSpec> spouts = new HashMap<String, SpoutSpec>();
HashMap<String, Bolt> bolts = new HashMap<String, Bolt>();
if (spoutOrBolt instanceof IRichSpout) {
spouts.put(operatorName, new SpoutSpec(null, common));
} else {
assert (spoutOrBolt instanceof IRichBolt);
bolts.put(operatorName, new Bolt(null, common));
}
stormTopology = new StormTopology(spouts, bolts, new HashMap<String, StateSpoutSpec>());
List<Integer> sortedTasks = new ArrayList<Integer>(dop);
for (int i = 1; i <= dop; ++i) {
taskToComponents.put(i, operatorName);
sortedTasks.add(i);
}
componentToSortedTasks.put(operatorName, sortedTasks);
SetupOutputFieldsDeclarer declarer = new SetupOutputFieldsDeclarer();
spoutOrBolt.declareOutputFields(declarer);
componentToStreamToFields.put(operatorName, declarer.outputStreams);
} else {
// whole topology is built (i.e. FlinkTopology is used)
Map<String, SpoutSpec> spouts = stormTopology.get_spouts();
Map<String, Bolt> bolts = stormTopology.get_bolts();
Map<String, StateSpoutSpec> stateSpouts = stormTopology.get_state_spouts();
tid = 1;
for (Entry<String, SpoutSpec> spout : spouts.entrySet()) {
Integer rc = processSingleOperator(spout.getKey(), spout.getValue().get_common(), operatorName, context.getIndexOfThisSubtask(), dop, taskToComponents, componentToSortedTasks, componentToStreamToFields);
if (rc != null) {
taskId = rc;
}
}
for (Entry<String, Bolt> bolt : bolts.entrySet()) {
Integer rc = processSingleOperator(bolt.getKey(), bolt.getValue().get_common(), operatorName, context.getIndexOfThisSubtask(), dop, taskToComponents, componentToSortedTasks, componentToStreamToFields);
if (rc != null) {
taskId = rc;
}
}
for (Entry<String, StateSpoutSpec> stateSpout : stateSpouts.entrySet()) {
Integer rc = processSingleOperator(stateSpout.getKey(), stateSpout.getValue().get_common(), operatorName, context.getIndexOfThisSubtask(), dop, taskToComponents, componentToSortedTasks, componentToStreamToFields);
if (rc != null) {
taskId = rc;
}
}
assert (taskId != null);
}
if (!stormConfig.containsKey(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS)) {
// Storm default value
stormConfig.put(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS, 30);
}
return new FlinkTopologyContext(stormTopology, stormConfig, taskToComponents, componentToSortedTasks, componentToStreamToFields, stormId, codeDir, pidDir, taskId, workerPort, workerTasks, defaultResources, userResources, executorData, registeredMetrics, openOrPrepareWasCalled);
}
use of org.apache.storm.topology.IRichBolt in project flink by apache.
the class BoltWrapperTest method testWrapper.
@SuppressWarnings({ "rawtypes", "unchecked" })
private void testWrapper(final int numberOfAttributes) throws Exception {
assert ((-1 <= numberOfAttributes) && (numberOfAttributes <= 25));
Tuple flinkTuple = null;
String rawTuple = null;
if (numberOfAttributes == -1) {
rawTuple = "test";
} else {
flinkTuple = Tuple.getTupleClass(numberOfAttributes).newInstance();
}
final String[] schema;
if (numberOfAttributes == -1) {
schema = new String[1];
} else {
schema = new String[numberOfAttributes];
}
for (int i = 0; i < schema.length; ++i) {
schema[i] = "a" + i;
}
final StreamRecord record = mock(StreamRecord.class);
if (numberOfAttributes == -1) {
when(record.getValue()).thenReturn(rawTuple);
} else {
when(record.getValue()).thenReturn(flinkTuple);
}
final StreamingRuntimeContext taskContext = mock(StreamingRuntimeContext.class);
when(taskContext.getExecutionConfig()).thenReturn(mock(ExecutionConfig.class));
when(taskContext.getTaskName()).thenReturn("name");
when(taskContext.getMetricGroup()).thenReturn(new UnregisteredMetricsGroup());
final IRichBolt bolt = mock(IRichBolt.class);
final SetupOutputFieldsDeclarer declarer = new SetupOutputFieldsDeclarer();
declarer.declare(new Fields(schema));
PowerMockito.whenNew(SetupOutputFieldsDeclarer.class).withNoArguments().thenReturn(declarer);
final BoltWrapper wrapper = new BoltWrapper(bolt, (Fields) null);
wrapper.setup(createMockStreamTask(), new StreamConfig(new Configuration()), mock(Output.class));
wrapper.open();
wrapper.processElement(record);
if (numberOfAttributes == -1) {
verify(bolt).execute(eq(new StormTuple<String>(rawTuple, null, -1, null, null, MessageId.makeUnanchored())));
} else {
verify(bolt).execute(eq(new StormTuple<Tuple>(flinkTuple, null, -1, null, null, MessageId.makeUnanchored())));
}
}
use of org.apache.storm.topology.IRichBolt in project flink by apache.
the class WrapperSetupInLocalClusterTest method testCreateTopologyContext.
@Test
public void testCreateTopologyContext() {
HashMap<String, Integer> dops = new HashMap<String, Integer>();
dops.put("spout1", 1);
dops.put("spout2", 3);
dops.put("bolt1", 1);
dops.put("bolt2", 2);
dops.put("sink", 1);
HashMap<String, Integer> taskCounter = new HashMap<String, Integer>();
taskCounter.put("spout1", 0);
taskCounter.put("spout2", 0);
taskCounter.put("bolt1", 0);
taskCounter.put("bolt2", 0);
taskCounter.put("sink", 0);
HashMap<String, IComponent> operators = new HashMap<String, IComponent>();
operators.put("spout1", new TestDummySpout());
operators.put("spout2", new TestDummySpout());
operators.put("bolt1", new TestDummyBolt());
operators.put("bolt2", new TestDummyBolt());
operators.put("sink", new TestSink());
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout1", (IRichSpout) operators.get("spout1"), dops.get("spout1"));
builder.setSpout("spout2", (IRichSpout) operators.get("spout2"), dops.get("spout2"));
builder.setBolt("bolt1", (IRichBolt) operators.get("bolt1"), dops.get("bolt1")).shuffleGrouping("spout1");
builder.setBolt("bolt2", (IRichBolt) operators.get("bolt2"), dops.get("bolt2")).allGrouping("spout2");
builder.setBolt("sink", (IRichBolt) operators.get("sink"), dops.get("sink")).shuffleGrouping("bolt1", TestDummyBolt.groupingStreamId).shuffleGrouping("bolt1", TestDummyBolt.shuffleStreamId).shuffleGrouping("bolt2", TestDummyBolt.groupingStreamId).shuffleGrouping("bolt2", TestDummyBolt.shuffleStreamId);
LocalCluster cluster = new LocalCluster();
Config c = new Config();
c.setNumAckers(0);
cluster.submitTopology("test", c, builder.createTopology());
while (TestSink.result.size() != 8) {
Utils.sleep(100);
}
cluster.shutdown();
final FlinkTopology flinkBuilder = FlinkTopology.createTopology(builder);
StormTopology stormTopology = flinkBuilder.getStormTopology();
Set<Integer> taskIds = new HashSet<Integer>();
for (TopologyContext expectedContext : TestSink.result) {
final String thisComponentId = expectedContext.getThisComponentId();
int index = taskCounter.get(thisComponentId);
StreamingRuntimeContext context = mock(StreamingRuntimeContext.class);
when(context.getTaskName()).thenReturn(thisComponentId);
when(context.getNumberOfParallelSubtasks()).thenReturn(dops.get(thisComponentId));
when(context.getIndexOfThisSubtask()).thenReturn(index);
taskCounter.put(thisComponentId, ++index);
Config stormConfig = new Config();
stormConfig.put(WrapperSetupHelper.TOPOLOGY_NAME, "test");
TopologyContext topologyContext = WrapperSetupHelper.createTopologyContext(context, operators.get(thisComponentId), thisComponentId, stormTopology, stormConfig);
ComponentCommon expcetedCommon = expectedContext.getComponentCommon(thisComponentId);
ComponentCommon common = topologyContext.getComponentCommon(thisComponentId);
Assert.assertNull(topologyContext.getCodeDir());
Assert.assertNull(common.get_json_conf());
Assert.assertNull(topologyContext.getExecutorData(null));
Assert.assertNull(topologyContext.getPIDDir());
Assert.assertNull(topologyContext.getResource(null));
Assert.assertNull(topologyContext.getSharedExecutor());
Assert.assertNull(expectedContext.getTaskData(null));
Assert.assertNull(topologyContext.getThisWorkerPort());
Assert.assertTrue(expectedContext.getStormId().startsWith(topologyContext.getStormId()));
Assert.assertEquals(expcetedCommon.get_inputs(), common.get_inputs());
Assert.assertEquals(expcetedCommon.get_parallelism_hint(), common.get_parallelism_hint());
Assert.assertEquals(expcetedCommon.get_streams(), common.get_streams());
Assert.assertEquals(expectedContext.getComponentIds(), topologyContext.getComponentIds());
Assert.assertEquals(expectedContext.getComponentStreams(thisComponentId), topologyContext.getComponentStreams(thisComponentId));
Assert.assertEquals(thisComponentId, topologyContext.getThisComponentId());
Assert.assertEquals(expectedContext.getThisSources(), topologyContext.getThisSources());
Assert.assertEquals(expectedContext.getThisStreams(), topologyContext.getThisStreams());
Assert.assertEquals(expectedContext.getThisTargets(), topologyContext.getThisTargets());
Assert.assertEquals(0, topologyContext.getThisWorkerTasks().size());
for (int taskId : topologyContext.getComponentTasks(thisComponentId)) {
Assert.assertEquals(thisComponentId, topologyContext.getComponentId(taskId));
}
for (String componentId : expectedContext.getComponentIds()) {
Assert.assertEquals(expectedContext.getSources(componentId), topologyContext.getSources(componentId));
Assert.assertEquals(expectedContext.getTargets(componentId), topologyContext.getTargets(componentId));
for (String streamId : expectedContext.getComponentStreams(componentId)) {
Assert.assertEquals(expectedContext.getComponentOutputFields(componentId, streamId).toList(), topologyContext.getComponentOutputFields(componentId, streamId).toList());
}
}
for (String streamId : expectedContext.getThisStreams()) {
Assert.assertEquals(expectedContext.getThisOutputFields(streamId).toList(), topologyContext.getThisOutputFields(streamId).toList());
}
HashMap<Integer, String> taskToComponents = new HashMap<Integer, String>();
Set<Integer> allTaskIds = new HashSet<Integer>();
for (String componentId : expectedContext.getComponentIds()) {
List<Integer> possibleTasks = expectedContext.getComponentTasks(componentId);
List<Integer> tasks = topologyContext.getComponentTasks(componentId);
Iterator<Integer> p_it = possibleTasks.iterator();
Iterator<Integer> t_it = tasks.iterator();
while (p_it.hasNext()) {
Assert.assertTrue(t_it.hasNext());
Assert.assertNull(taskToComponents.put(p_it.next(), componentId));
Assert.assertTrue(allTaskIds.add(t_it.next()));
}
Assert.assertFalse(t_it.hasNext());
}
Assert.assertEquals(taskToComponents, expectedContext.getTaskToComponent());
Assert.assertTrue(taskIds.add(topologyContext.getThisTaskId()));
try {
topologyContext.getHooks();
Assert.fail();
} catch (UnsupportedOperationException e) {
/* expected */
}
try {
topologyContext.getRegisteredMetricByName(null);
Assert.fail();
} catch (UnsupportedOperationException e) {
/* expected */
}
}
}
use of org.apache.storm.topology.IRichBolt in project storm by apache.
the class LinearDRPCTopologyBuilder method createTopology.
private StormTopology createTopology(DRPCSpout spout) {
final String SPOUT_ID = "spout";
final String PREPARE_ID = "prepare-request";
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SPOUT_ID, spout);
builder.setBolt(PREPARE_ID, new PrepareRequest()).noneGrouping(SPOUT_ID);
int i = 0;
for (; i < _components.size(); i++) {
Component component = _components.get(i);
Map<String, SourceArgs> source = new HashMap<String, SourceArgs>();
if (i == 1) {
source.put(boltId(i - 1), SourceArgs.single());
} else if (i >= 2) {
source.put(boltId(i - 1), SourceArgs.all());
}
IdStreamSpec idSpec = null;
if (i == _components.size() - 1 && component.bolt instanceof FinishedCallback) {
idSpec = IdStreamSpec.makeDetectSpec(PREPARE_ID, PrepareRequest.ID_STREAM);
}
BoltDeclarer declarer = builder.setBolt(boltId(i), new CoordinatedBolt(component.bolt, source, idSpec), component.parallelism);
for (Map<String, Object> conf : component.componentConfs) {
declarer.addConfigurations(conf);
}
if (idSpec != null) {
declarer.fieldsGrouping(idSpec.getGlobalStreamId().get_componentId(), PrepareRequest.ID_STREAM, new Fields("request"));
}
if (i == 0 && component.declarations.isEmpty()) {
declarer.noneGrouping(PREPARE_ID, PrepareRequest.ARGS_STREAM);
} else {
String prevId;
if (i == 0) {
prevId = PREPARE_ID;
} else {
prevId = boltId(i - 1);
}
for (InputDeclaration declaration : component.declarations) {
declaration.declare(prevId, declarer);
}
}
if (i > 0) {
declarer.directGrouping(boltId(i - 1), Constants.COORDINATED_STREAM_ID);
}
}
IRichBolt lastBolt = _components.get(_components.size() - 1).bolt;
OutputFieldsGetter getter = new OutputFieldsGetter();
lastBolt.declareOutputFields(getter);
Map<String, StreamInfo> streams = getter.getFieldsDeclaration();
if (streams.size() != 1) {
throw new RuntimeException("Must declare exactly one stream from last bolt in LinearDRPCTopology");
}
String outputStream = streams.keySet().iterator().next();
List<String> fields = streams.get(outputStream).get_output_fields();
if (fields.size() != 2) {
throw new RuntimeException("Output stream of last component in LinearDRPCTopology must contain exactly two fields. The first should be the request id, and the second should be the result.");
}
builder.setBolt(boltId(i), new JoinResult(PREPARE_ID)).fieldsGrouping(boltId(i - 1), outputStream, new Fields(fields.get(0))).fieldsGrouping(PREPARE_ID, PrepareRequest.RETURN_STREAM, new Fields("request"));
i++;
builder.setBolt(boltId(i), new ReturnResults()).noneGrouping(boltId(i - 1));
return builder.createTopology();
}
Aggregations