use of org.apache.storm.generated.StormTopology in project storm by apache.
the class TestResourceAwareScheduler method testMemoryLoadLargerThanMaxHeapSize.
@Test(expected = IllegalArgumentException.class)
public void testMemoryLoadLargerThanMaxHeapSize() throws Exception {
// Topology will not be able to be successfully scheduled: Config TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB=128.0 < 129.0,
// Largest memory requirement of a component in the topology).
TopologyBuilder builder1 = new TopologyBuilder();
builder1.setSpout("wordSpout1", new TestWordSpout(), 4);
StormTopology stormTopology1 = builder1.createTopology();
Config config1 = new Config();
config1.putAll(defaultTopologyConf);
config1.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, 128.0);
config1.put(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, 129.0);
StormSubmitter.submitTopologyWithProgressBar("test", config1, stormTopology1);
}
use of org.apache.storm.generated.StormTopology in project flink by apache.
the class WrapperSetupHelper method createTopologyContext.
/**
* Creates a {@link TopologyContext} for a Spout or Bolt instance (ie, Flink task / Storm executor).
*
* @param context
* The Flink runtime context.
* @param spoutOrBolt
* The Spout or Bolt this context is created for.
* @param stormTopology
* The original Storm topology.
* @param stormConfig
* The user provided configuration.
* @return The created {@link TopologyContext}.
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
static synchronized TopologyContext createTopologyContext(final StreamingRuntimeContext context, final IComponent spoutOrBolt, final String operatorName, StormTopology stormTopology, final Map stormConfig) {
final int dop = context.getNumberOfParallelSubtasks();
final Map<Integer, String> taskToComponents = new HashMap<Integer, String>();
final Map<String, List<Integer>> componentToSortedTasks = new HashMap<String, List<Integer>>();
final Map<String, Map<String, Fields>> componentToStreamToFields = new HashMap<String, Map<String, Fields>>();
String stormId = (String) stormConfig.get(TOPOLOGY_NAME);
// not supported
String codeDir = null;
// not supported
String pidDir = null;
Integer taskId = -1;
// not supported
Integer workerPort = null;
List<Integer> workerTasks = new ArrayList<Integer>();
final Map<String, Object> defaultResources = new HashMap<String, Object>();
final Map<String, Object> userResources = new HashMap<String, Object>();
final Map<String, Object> executorData = new HashMap<String, Object>();
final Map registeredMetrics = new HashMap();
Atom openOrPrepareWasCalled = null;
if (stormTopology == null) {
// embedded mode
ComponentCommon common = new ComponentCommon();
common.set_parallelism_hint(dop);
HashMap<String, SpoutSpec> spouts = new HashMap<String, SpoutSpec>();
HashMap<String, Bolt> bolts = new HashMap<String, Bolt>();
if (spoutOrBolt instanceof IRichSpout) {
spouts.put(operatorName, new SpoutSpec(null, common));
} else {
assert (spoutOrBolt instanceof IRichBolt);
bolts.put(operatorName, new Bolt(null, common));
}
stormTopology = new StormTopology(spouts, bolts, new HashMap<String, StateSpoutSpec>());
List<Integer> sortedTasks = new ArrayList<Integer>(dop);
for (int i = 1; i <= dop; ++i) {
taskToComponents.put(i, operatorName);
sortedTasks.add(i);
}
componentToSortedTasks.put(operatorName, sortedTasks);
SetupOutputFieldsDeclarer declarer = new SetupOutputFieldsDeclarer();
spoutOrBolt.declareOutputFields(declarer);
componentToStreamToFields.put(operatorName, declarer.outputStreams);
} else {
// whole topology is built (i.e. FlinkTopology is used)
Map<String, SpoutSpec> spouts = stormTopology.get_spouts();
Map<String, Bolt> bolts = stormTopology.get_bolts();
Map<String, StateSpoutSpec> stateSpouts = stormTopology.get_state_spouts();
tid = 1;
for (Entry<String, SpoutSpec> spout : spouts.entrySet()) {
Integer rc = processSingleOperator(spout.getKey(), spout.getValue().get_common(), operatorName, context.getIndexOfThisSubtask(), dop, taskToComponents, componentToSortedTasks, componentToStreamToFields);
if (rc != null) {
taskId = rc;
}
}
for (Entry<String, Bolt> bolt : bolts.entrySet()) {
Integer rc = processSingleOperator(bolt.getKey(), bolt.getValue().get_common(), operatorName, context.getIndexOfThisSubtask(), dop, taskToComponents, componentToSortedTasks, componentToStreamToFields);
if (rc != null) {
taskId = rc;
}
}
for (Entry<String, StateSpoutSpec> stateSpout : stateSpouts.entrySet()) {
Integer rc = processSingleOperator(stateSpout.getKey(), stateSpout.getValue().get_common(), operatorName, context.getIndexOfThisSubtask(), dop, taskToComponents, componentToSortedTasks, componentToStreamToFields);
if (rc != null) {
taskId = rc;
}
}
assert (taskId != null);
}
if (!stormConfig.containsKey(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS)) {
// Storm default value
stormConfig.put(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS, 30);
}
return new FlinkTopologyContext(stormTopology, stormConfig, taskToComponents, componentToSortedTasks, componentToStreamToFields, stormId, codeDir, pidDir, taskId, workerPort, workerTasks, defaultResources, userResources, executorData, registeredMetrics, openOrPrepareWasCalled);
}
use of org.apache.storm.generated.StormTopology in project flink by apache.
the class WrapperSetupInLocalClusterTest method testCreateTopologyContext.
@Test
public void testCreateTopologyContext() {
HashMap<String, Integer> dops = new HashMap<String, Integer>();
dops.put("spout1", 1);
dops.put("spout2", 3);
dops.put("bolt1", 1);
dops.put("bolt2", 2);
dops.put("sink", 1);
HashMap<String, Integer> taskCounter = new HashMap<String, Integer>();
taskCounter.put("spout1", 0);
taskCounter.put("spout2", 0);
taskCounter.put("bolt1", 0);
taskCounter.put("bolt2", 0);
taskCounter.put("sink", 0);
HashMap<String, IComponent> operators = new HashMap<String, IComponent>();
operators.put("spout1", new TestDummySpout());
operators.put("spout2", new TestDummySpout());
operators.put("bolt1", new TestDummyBolt());
operators.put("bolt2", new TestDummyBolt());
operators.put("sink", new TestSink());
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout1", (IRichSpout) operators.get("spout1"), dops.get("spout1"));
builder.setSpout("spout2", (IRichSpout) operators.get("spout2"), dops.get("spout2"));
builder.setBolt("bolt1", (IRichBolt) operators.get("bolt1"), dops.get("bolt1")).shuffleGrouping("spout1");
builder.setBolt("bolt2", (IRichBolt) operators.get("bolt2"), dops.get("bolt2")).allGrouping("spout2");
builder.setBolt("sink", (IRichBolt) operators.get("sink"), dops.get("sink")).shuffleGrouping("bolt1", TestDummyBolt.groupingStreamId).shuffleGrouping("bolt1", TestDummyBolt.shuffleStreamId).shuffleGrouping("bolt2", TestDummyBolt.groupingStreamId).shuffleGrouping("bolt2", TestDummyBolt.shuffleStreamId);
LocalCluster cluster = new LocalCluster();
Config c = new Config();
c.setNumAckers(0);
cluster.submitTopology("test", c, builder.createTopology());
while (TestSink.result.size() != 8) {
Utils.sleep(100);
}
cluster.shutdown();
final FlinkTopology flinkBuilder = FlinkTopology.createTopology(builder);
StormTopology stormTopology = flinkBuilder.getStormTopology();
Set<Integer> taskIds = new HashSet<Integer>();
for (TopologyContext expectedContext : TestSink.result) {
final String thisComponentId = expectedContext.getThisComponentId();
int index = taskCounter.get(thisComponentId);
StreamingRuntimeContext context = mock(StreamingRuntimeContext.class);
when(context.getTaskName()).thenReturn(thisComponentId);
when(context.getNumberOfParallelSubtasks()).thenReturn(dops.get(thisComponentId));
when(context.getIndexOfThisSubtask()).thenReturn(index);
taskCounter.put(thisComponentId, ++index);
Config stormConfig = new Config();
stormConfig.put(WrapperSetupHelper.TOPOLOGY_NAME, "test");
TopologyContext topologyContext = WrapperSetupHelper.createTopologyContext(context, operators.get(thisComponentId), thisComponentId, stormTopology, stormConfig);
ComponentCommon expcetedCommon = expectedContext.getComponentCommon(thisComponentId);
ComponentCommon common = topologyContext.getComponentCommon(thisComponentId);
Assert.assertNull(topologyContext.getCodeDir());
Assert.assertNull(common.get_json_conf());
Assert.assertNull(topologyContext.getExecutorData(null));
Assert.assertNull(topologyContext.getPIDDir());
Assert.assertNull(topologyContext.getResource(null));
Assert.assertNull(topologyContext.getSharedExecutor());
Assert.assertNull(expectedContext.getTaskData(null));
Assert.assertNull(topologyContext.getThisWorkerPort());
Assert.assertTrue(expectedContext.getStormId().startsWith(topologyContext.getStormId()));
Assert.assertEquals(expcetedCommon.get_inputs(), common.get_inputs());
Assert.assertEquals(expcetedCommon.get_parallelism_hint(), common.get_parallelism_hint());
Assert.assertEquals(expcetedCommon.get_streams(), common.get_streams());
Assert.assertEquals(expectedContext.getComponentIds(), topologyContext.getComponentIds());
Assert.assertEquals(expectedContext.getComponentStreams(thisComponentId), topologyContext.getComponentStreams(thisComponentId));
Assert.assertEquals(thisComponentId, topologyContext.getThisComponentId());
Assert.assertEquals(expectedContext.getThisSources(), topologyContext.getThisSources());
Assert.assertEquals(expectedContext.getThisStreams(), topologyContext.getThisStreams());
Assert.assertEquals(expectedContext.getThisTargets(), topologyContext.getThisTargets());
Assert.assertEquals(0, topologyContext.getThisWorkerTasks().size());
for (int taskId : topologyContext.getComponentTasks(thisComponentId)) {
Assert.assertEquals(thisComponentId, topologyContext.getComponentId(taskId));
}
for (String componentId : expectedContext.getComponentIds()) {
Assert.assertEquals(expectedContext.getSources(componentId), topologyContext.getSources(componentId));
Assert.assertEquals(expectedContext.getTargets(componentId), topologyContext.getTargets(componentId));
for (String streamId : expectedContext.getComponentStreams(componentId)) {
Assert.assertEquals(expectedContext.getComponentOutputFields(componentId, streamId).toList(), topologyContext.getComponentOutputFields(componentId, streamId).toList());
}
}
for (String streamId : expectedContext.getThisStreams()) {
Assert.assertEquals(expectedContext.getThisOutputFields(streamId).toList(), topologyContext.getThisOutputFields(streamId).toList());
}
HashMap<Integer, String> taskToComponents = new HashMap<Integer, String>();
Set<Integer> allTaskIds = new HashSet<Integer>();
for (String componentId : expectedContext.getComponentIds()) {
List<Integer> possibleTasks = expectedContext.getComponentTasks(componentId);
List<Integer> tasks = topologyContext.getComponentTasks(componentId);
Iterator<Integer> p_it = possibleTasks.iterator();
Iterator<Integer> t_it = tasks.iterator();
while (p_it.hasNext()) {
Assert.assertTrue(t_it.hasNext());
Assert.assertNull(taskToComponents.put(p_it.next(), componentId));
Assert.assertTrue(allTaskIds.add(t_it.next()));
}
Assert.assertFalse(t_it.hasNext());
}
Assert.assertEquals(taskToComponents, expectedContext.getTaskToComponent());
Assert.assertTrue(taskIds.add(topologyContext.getThisTaskId()));
try {
topologyContext.getHooks();
Assert.fail();
} catch (UnsupportedOperationException e) {
/* expected */
}
try {
topologyContext.getRegisteredMetricByName(null);
Assert.fail();
} catch (UnsupportedOperationException e) {
/* expected */
}
}
}
use of org.apache.storm.generated.StormTopology in project storm by apache.
the class Zookeeper method leaderLatchListenerImpl.
// Leader latch listener that will be invoked when we either gain or lose leadership
public static LeaderLatchListener leaderLatchListenerImpl(final Map conf, final CuratorFramework zk, final BlobStore blobStore, final LeaderLatch leaderLatch) throws UnknownHostException {
final String hostName = InetAddress.getLocalHost().getCanonicalHostName();
return new LeaderLatchListener() {
final String STORM_JAR_SUFFIX = "-stormjar.jar";
final String STORM_CODE_SUFFIX = "-stormcode.ser";
final String STORM_CONF_SUFFIX = "-stormconf.ser";
@Override
public void isLeader() {
Set<String> activeTopologyIds = new TreeSet<>(Zookeeper.getChildren(zk, conf.get(Config.STORM_ZOOKEEPER_ROOT) + ClusterUtils.STORMS_SUBTREE, false));
Set<String> activeTopologyBlobKeys = populateTopologyBlobKeys(activeTopologyIds);
Set<String> activeTopologyCodeKeys = filterTopologyCodeKeys(activeTopologyBlobKeys);
Set<String> allLocalBlobKeys = Sets.newHashSet(blobStore.listKeys());
Set<String> allLocalTopologyBlobKeys = filterTopologyBlobKeys(allLocalBlobKeys);
// this finds all active topologies blob keys from all local topology blob keys
Sets.SetView<String> diffTopology = Sets.difference(activeTopologyBlobKeys, allLocalTopologyBlobKeys);
LOG.info("active-topology-blobs [{}] local-topology-blobs [{}] diff-topology-blobs [{}]", generateJoinedString(activeTopologyIds), generateJoinedString(allLocalTopologyBlobKeys), generateJoinedString(diffTopology));
if (diffTopology.isEmpty()) {
Set<String> activeTopologyDependencies = getTopologyDependencyKeys(activeTopologyCodeKeys);
// this finds all dependency blob keys from active topologies from all local blob keys
Sets.SetView<String> diffDependencies = Sets.difference(activeTopologyDependencies, allLocalBlobKeys);
LOG.info("active-topology-dependencies [{}] local-blobs [{}] diff-topology-dependencies [{}]", generateJoinedString(activeTopologyDependencies), generateJoinedString(allLocalBlobKeys), generateJoinedString(diffDependencies));
if (diffDependencies.isEmpty()) {
LOG.info("Accepting leadership, all active topologies and corresponding dependencies found locally.");
} else {
LOG.info("Code for all active topologies is available locally, but some dependencies are not found locally, giving up leadership.");
closeLatch();
}
} else {
LOG.info("code for all active topologies not available locally, giving up leadership.");
closeLatch();
}
}
@Override
public void notLeader() {
LOG.info("{} lost leadership.", hostName);
}
private String generateJoinedString(Set<String> activeTopologyIds) {
return Joiner.on(",").join(activeTopologyIds);
}
private Set<String> populateTopologyBlobKeys(Set<String> activeTopologyIds) {
Set<String> activeTopologyBlobKeys = new TreeSet<>();
for (String activeTopologyId : activeTopologyIds) {
activeTopologyBlobKeys.add(activeTopologyId + STORM_JAR_SUFFIX);
activeTopologyBlobKeys.add(activeTopologyId + STORM_CODE_SUFFIX);
activeTopologyBlobKeys.add(activeTopologyId + STORM_CONF_SUFFIX);
}
return activeTopologyBlobKeys;
}
private Set<String> filterTopologyBlobKeys(Set<String> blobKeys) {
Set<String> topologyBlobKeys = new HashSet<>();
for (String blobKey : blobKeys) {
if (blobKey.endsWith(STORM_JAR_SUFFIX) || blobKey.endsWith(STORM_CODE_SUFFIX) || blobKey.endsWith(STORM_CONF_SUFFIX)) {
topologyBlobKeys.add(blobKey);
}
}
return topologyBlobKeys;
}
private Set<String> filterTopologyCodeKeys(Set<String> blobKeys) {
Set<String> topologyCodeKeys = new HashSet<>();
for (String blobKey : blobKeys) {
if (blobKey.endsWith(STORM_CODE_SUFFIX)) {
topologyCodeKeys.add(blobKey);
}
}
return topologyCodeKeys;
}
private Set<String> getTopologyDependencyKeys(Set<String> activeTopologyCodeKeys) {
Set<String> activeTopologyDependencies = new TreeSet<>();
Subject subject = ReqContext.context().subject();
for (String activeTopologyCodeKey : activeTopologyCodeKeys) {
try {
InputStreamWithMeta blob = blobStore.getBlob(activeTopologyCodeKey, subject);
byte[] blobContent = IOUtils.readFully(blob, new Long(blob.getFileLength()).intValue());
StormTopology stormCode = Utils.deserialize(blobContent, StormTopology.class);
if (stormCode.is_set_dependency_jars()) {
activeTopologyDependencies.addAll(stormCode.get_dependency_jars());
}
if (stormCode.is_set_dependency_artifacts()) {
activeTopologyDependencies.addAll(stormCode.get_dependency_artifacts());
}
} catch (AuthorizationException | KeyNotFoundException | IOException e) {
LOG.error("Exception occurs while reading blob for key: " + activeTopologyCodeKey + ", exception: " + e, e);
throw new RuntimeException("Exception occurs while reading blob for key: " + activeTopologyCodeKey + ", exception: " + e, e);
}
}
return activeTopologyDependencies;
}
private void closeLatch() {
try {
leaderLatch.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
};
}
use of org.apache.storm.generated.StormTopology in project ignite by apache.
the class StormIgniteStreamerSelfTest method testStormStreamerIgniteBolt.
/**
* Tests for the streamer bolt. Ignite started in bolt based on what is specified in the configuration file.
*
* @throws TimeoutException
* @throws InterruptedException
*/
public void testStormStreamerIgniteBolt() throws TimeoutException, InterruptedException {
final StormStreamer<String, String> stormStreamer = new StormStreamer<>();
stormStreamer.setAutoFlushFrequency(10L);
stormStreamer.setAllowOverwrite(true);
stormStreamer.setCacheName(TEST_CACHE);
stormStreamer.setIgniteTupleField(TestStormSpout.IGNITE_TUPLE_FIELD);
stormStreamer.setIgniteConfigFile(GRID_CONF_FILE);
Config daemonConf = new Config();
daemonConf.put(Config.STORM_LOCAL_MODE_ZMQ, false);
MkClusterParam mkClusterParam = new MkClusterParam();
mkClusterParam.setDaemonConf(daemonConf);
mkClusterParam.setSupervisors(4);
final CountDownLatch latch = new CountDownLatch(TestStormSpout.CNT);
IgniteBiPredicate<UUID, CacheEvent> putLsnr = new IgniteBiPredicate<UUID, CacheEvent>() {
@Override
public boolean apply(UUID uuid, CacheEvent evt) {
assert evt != null;
latch.countDown();
return true;
}
};
final UUID putLsnrId = ignite.events(ignite.cluster().forCacheNodes(TEST_CACHE)).remoteListen(putLsnr, null, EVT_CACHE_OBJECT_PUT);
Testing.withSimulatedTimeLocalCluster(mkClusterParam, new TestJob() {
@Override
public void run(ILocalCluster cluster) throws IOException, InterruptedException {
// Creates a test topology.
TopologyBuilder builder = new TopologyBuilder();
TestStormSpout testStormSpout = new TestStormSpout();
builder.setSpout("test-spout", testStormSpout);
builder.setBolt("ignite-bolt", stormStreamer, STORM_EXECUTORS).shuffleGrouping("test-spout");
StormTopology topology = builder.createTopology();
// Prepares a mock data for the spout.
MockedSources mockedSources = new MockedSources();
mockedSources.addMockData("test-spout", getMockData());
// Prepares the config.
Config conf = new Config();
conf.setMessageTimeoutSecs(10);
IgniteCache<Integer, String> cache = ignite.cache(TEST_CACHE);
CompleteTopologyParam completeTopologyParam = new CompleteTopologyParam();
completeTopologyParam.setTimeoutMs(10000);
completeTopologyParam.setMockedSources(mockedSources);
completeTopologyParam.setStormConf(conf);
// Checks the cache doesn't contain any entries yet.
assertEquals(0, cache.size(CachePeekMode.PRIMARY));
Testing.completeTopology(cluster, topology, completeTopologyParam);
// Checks events successfully processed in 20 seconds.
assertTrue(latch.await(10, TimeUnit.SECONDS));
ignite.events(ignite.cluster().forCacheNodes(TEST_CACHE)).stopRemoteListen(putLsnrId);
// Validates all entries are in the cache.
assertEquals(TestStormSpout.CNT, cache.size(CachePeekMode.PRIMARY));
for (Map.Entry<Integer, String> entry : TestStormSpout.getKeyValMap().entrySet()) assertEquals(entry.getValue(), cache.get(entry.getKey()));
}
});
}
Aggregations