Search in sources :

Example 86 with StormTopology

use of org.apache.storm.generated.StormTopology in project storm by apache.

the class TestResourceAwareScheduler method testMemoryLoadLargerThanMaxHeapSize.

@Test(expected = IllegalArgumentException.class)
public void testMemoryLoadLargerThanMaxHeapSize() throws Exception {
    // Topology will not be able to be successfully scheduled: Config TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB=128.0 < 129.0,
    // Largest memory requirement of a component in the topology).
    TopologyBuilder builder1 = new TopologyBuilder();
    builder1.setSpout("wordSpout1", new TestWordSpout(), 4);
    StormTopology stormTopology1 = builder1.createTopology();
    Config config1 = new Config();
    config1.putAll(defaultTopologyConf);
    config1.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, 128.0);
    config1.put(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, 129.0);
    StormSubmitter.submitTopologyWithProgressBar("test", config1, stormTopology1);
}
Also used : TopologyBuilder(org.apache.storm.topology.TopologyBuilder) Config(org.apache.storm.Config) StormTopology(org.apache.storm.generated.StormTopology) TestWordSpout(org.apache.storm.testing.TestWordSpout) Test(org.junit.Test)

Example 87 with StormTopology

use of org.apache.storm.generated.StormTopology in project flink by apache.

the class WrapperSetupHelper method createTopologyContext.

/**
	 * Creates a {@link TopologyContext} for a Spout or Bolt instance (ie, Flink task / Storm executor).
	 * 
	 * @param context
	 *            The Flink runtime context.
	 * @param spoutOrBolt
	 *            The Spout or Bolt this context is created for.
	 * @param stormTopology
	 *            The original Storm topology.
	 * @param stormConfig
	 *            The user provided configuration.
	 * @return The created {@link TopologyContext}.
	 */
@SuppressWarnings({ "rawtypes", "unchecked" })
static synchronized TopologyContext createTopologyContext(final StreamingRuntimeContext context, final IComponent spoutOrBolt, final String operatorName, StormTopology stormTopology, final Map stormConfig) {
    final int dop = context.getNumberOfParallelSubtasks();
    final Map<Integer, String> taskToComponents = new HashMap<Integer, String>();
    final Map<String, List<Integer>> componentToSortedTasks = new HashMap<String, List<Integer>>();
    final Map<String, Map<String, Fields>> componentToStreamToFields = new HashMap<String, Map<String, Fields>>();
    String stormId = (String) stormConfig.get(TOPOLOGY_NAME);
    // not supported
    String codeDir = null;
    // not supported
    String pidDir = null;
    Integer taskId = -1;
    // not supported
    Integer workerPort = null;
    List<Integer> workerTasks = new ArrayList<Integer>();
    final Map<String, Object> defaultResources = new HashMap<String, Object>();
    final Map<String, Object> userResources = new HashMap<String, Object>();
    final Map<String, Object> executorData = new HashMap<String, Object>();
    final Map registeredMetrics = new HashMap();
    Atom openOrPrepareWasCalled = null;
    if (stormTopology == null) {
        // embedded mode
        ComponentCommon common = new ComponentCommon();
        common.set_parallelism_hint(dop);
        HashMap<String, SpoutSpec> spouts = new HashMap<String, SpoutSpec>();
        HashMap<String, Bolt> bolts = new HashMap<String, Bolt>();
        if (spoutOrBolt instanceof IRichSpout) {
            spouts.put(operatorName, new SpoutSpec(null, common));
        } else {
            assert (spoutOrBolt instanceof IRichBolt);
            bolts.put(operatorName, new Bolt(null, common));
        }
        stormTopology = new StormTopology(spouts, bolts, new HashMap<String, StateSpoutSpec>());
        List<Integer> sortedTasks = new ArrayList<Integer>(dop);
        for (int i = 1; i <= dop; ++i) {
            taskToComponents.put(i, operatorName);
            sortedTasks.add(i);
        }
        componentToSortedTasks.put(operatorName, sortedTasks);
        SetupOutputFieldsDeclarer declarer = new SetupOutputFieldsDeclarer();
        spoutOrBolt.declareOutputFields(declarer);
        componentToStreamToFields.put(operatorName, declarer.outputStreams);
    } else {
        // whole topology is built (i.e. FlinkTopology is used)
        Map<String, SpoutSpec> spouts = stormTopology.get_spouts();
        Map<String, Bolt> bolts = stormTopology.get_bolts();
        Map<String, StateSpoutSpec> stateSpouts = stormTopology.get_state_spouts();
        tid = 1;
        for (Entry<String, SpoutSpec> spout : spouts.entrySet()) {
            Integer rc = processSingleOperator(spout.getKey(), spout.getValue().get_common(), operatorName, context.getIndexOfThisSubtask(), dop, taskToComponents, componentToSortedTasks, componentToStreamToFields);
            if (rc != null) {
                taskId = rc;
            }
        }
        for (Entry<String, Bolt> bolt : bolts.entrySet()) {
            Integer rc = processSingleOperator(bolt.getKey(), bolt.getValue().get_common(), operatorName, context.getIndexOfThisSubtask(), dop, taskToComponents, componentToSortedTasks, componentToStreamToFields);
            if (rc != null) {
                taskId = rc;
            }
        }
        for (Entry<String, StateSpoutSpec> stateSpout : stateSpouts.entrySet()) {
            Integer rc = processSingleOperator(stateSpout.getKey(), stateSpout.getValue().get_common(), operatorName, context.getIndexOfThisSubtask(), dop, taskToComponents, componentToSortedTasks, componentToStreamToFields);
            if (rc != null) {
                taskId = rc;
            }
        }
        assert (taskId != null);
    }
    if (!stormConfig.containsKey(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS)) {
        // Storm default value
        stormConfig.put(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS, 30);
    }
    return new FlinkTopologyContext(stormTopology, stormConfig, taskToComponents, componentToSortedTasks, componentToStreamToFields, stormId, codeDir, pidDir, taskId, workerPort, workerTasks, defaultResources, userResources, executorData, registeredMetrics, openOrPrepareWasCalled);
}
Also used : HashMap(java.util.HashMap) StormTopology(org.apache.storm.generated.StormTopology) ArrayList(java.util.ArrayList) StateSpoutSpec(org.apache.storm.generated.StateSpoutSpec) ArrayList(java.util.ArrayList) List(java.util.List) ComponentCommon(org.apache.storm.generated.ComponentCommon) IRichBolt(org.apache.storm.topology.IRichBolt) Bolt(org.apache.storm.generated.Bolt) IRichBolt(org.apache.storm.topology.IRichBolt) Atom(clojure.lang.Atom) Fields(org.apache.storm.tuple.Fields) IRichSpout(org.apache.storm.topology.IRichSpout) StateSpoutSpec(org.apache.storm.generated.StateSpoutSpec) SpoutSpec(org.apache.storm.generated.SpoutSpec) HashMap(java.util.HashMap) Map(java.util.Map)

Example 88 with StormTopology

use of org.apache.storm.generated.StormTopology in project flink by apache.

the class WrapperSetupInLocalClusterTest method testCreateTopologyContext.

@Test
public void testCreateTopologyContext() {
    HashMap<String, Integer> dops = new HashMap<String, Integer>();
    dops.put("spout1", 1);
    dops.put("spout2", 3);
    dops.put("bolt1", 1);
    dops.put("bolt2", 2);
    dops.put("sink", 1);
    HashMap<String, Integer> taskCounter = new HashMap<String, Integer>();
    taskCounter.put("spout1", 0);
    taskCounter.put("spout2", 0);
    taskCounter.put("bolt1", 0);
    taskCounter.put("bolt2", 0);
    taskCounter.put("sink", 0);
    HashMap<String, IComponent> operators = new HashMap<String, IComponent>();
    operators.put("spout1", new TestDummySpout());
    operators.put("spout2", new TestDummySpout());
    operators.put("bolt1", new TestDummyBolt());
    operators.put("bolt2", new TestDummyBolt());
    operators.put("sink", new TestSink());
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("spout1", (IRichSpout) operators.get("spout1"), dops.get("spout1"));
    builder.setSpout("spout2", (IRichSpout) operators.get("spout2"), dops.get("spout2"));
    builder.setBolt("bolt1", (IRichBolt) operators.get("bolt1"), dops.get("bolt1")).shuffleGrouping("spout1");
    builder.setBolt("bolt2", (IRichBolt) operators.get("bolt2"), dops.get("bolt2")).allGrouping("spout2");
    builder.setBolt("sink", (IRichBolt) operators.get("sink"), dops.get("sink")).shuffleGrouping("bolt1", TestDummyBolt.groupingStreamId).shuffleGrouping("bolt1", TestDummyBolt.shuffleStreamId).shuffleGrouping("bolt2", TestDummyBolt.groupingStreamId).shuffleGrouping("bolt2", TestDummyBolt.shuffleStreamId);
    LocalCluster cluster = new LocalCluster();
    Config c = new Config();
    c.setNumAckers(0);
    cluster.submitTopology("test", c, builder.createTopology());
    while (TestSink.result.size() != 8) {
        Utils.sleep(100);
    }
    cluster.shutdown();
    final FlinkTopology flinkBuilder = FlinkTopology.createTopology(builder);
    StormTopology stormTopology = flinkBuilder.getStormTopology();
    Set<Integer> taskIds = new HashSet<Integer>();
    for (TopologyContext expectedContext : TestSink.result) {
        final String thisComponentId = expectedContext.getThisComponentId();
        int index = taskCounter.get(thisComponentId);
        StreamingRuntimeContext context = mock(StreamingRuntimeContext.class);
        when(context.getTaskName()).thenReturn(thisComponentId);
        when(context.getNumberOfParallelSubtasks()).thenReturn(dops.get(thisComponentId));
        when(context.getIndexOfThisSubtask()).thenReturn(index);
        taskCounter.put(thisComponentId, ++index);
        Config stormConfig = new Config();
        stormConfig.put(WrapperSetupHelper.TOPOLOGY_NAME, "test");
        TopologyContext topologyContext = WrapperSetupHelper.createTopologyContext(context, operators.get(thisComponentId), thisComponentId, stormTopology, stormConfig);
        ComponentCommon expcetedCommon = expectedContext.getComponentCommon(thisComponentId);
        ComponentCommon common = topologyContext.getComponentCommon(thisComponentId);
        Assert.assertNull(topologyContext.getCodeDir());
        Assert.assertNull(common.get_json_conf());
        Assert.assertNull(topologyContext.getExecutorData(null));
        Assert.assertNull(topologyContext.getPIDDir());
        Assert.assertNull(topologyContext.getResource(null));
        Assert.assertNull(topologyContext.getSharedExecutor());
        Assert.assertNull(expectedContext.getTaskData(null));
        Assert.assertNull(topologyContext.getThisWorkerPort());
        Assert.assertTrue(expectedContext.getStormId().startsWith(topologyContext.getStormId()));
        Assert.assertEquals(expcetedCommon.get_inputs(), common.get_inputs());
        Assert.assertEquals(expcetedCommon.get_parallelism_hint(), common.get_parallelism_hint());
        Assert.assertEquals(expcetedCommon.get_streams(), common.get_streams());
        Assert.assertEquals(expectedContext.getComponentIds(), topologyContext.getComponentIds());
        Assert.assertEquals(expectedContext.getComponentStreams(thisComponentId), topologyContext.getComponentStreams(thisComponentId));
        Assert.assertEquals(thisComponentId, topologyContext.getThisComponentId());
        Assert.assertEquals(expectedContext.getThisSources(), topologyContext.getThisSources());
        Assert.assertEquals(expectedContext.getThisStreams(), topologyContext.getThisStreams());
        Assert.assertEquals(expectedContext.getThisTargets(), topologyContext.getThisTargets());
        Assert.assertEquals(0, topologyContext.getThisWorkerTasks().size());
        for (int taskId : topologyContext.getComponentTasks(thisComponentId)) {
            Assert.assertEquals(thisComponentId, topologyContext.getComponentId(taskId));
        }
        for (String componentId : expectedContext.getComponentIds()) {
            Assert.assertEquals(expectedContext.getSources(componentId), topologyContext.getSources(componentId));
            Assert.assertEquals(expectedContext.getTargets(componentId), topologyContext.getTargets(componentId));
            for (String streamId : expectedContext.getComponentStreams(componentId)) {
                Assert.assertEquals(expectedContext.getComponentOutputFields(componentId, streamId).toList(), topologyContext.getComponentOutputFields(componentId, streamId).toList());
            }
        }
        for (String streamId : expectedContext.getThisStreams()) {
            Assert.assertEquals(expectedContext.getThisOutputFields(streamId).toList(), topologyContext.getThisOutputFields(streamId).toList());
        }
        HashMap<Integer, String> taskToComponents = new HashMap<Integer, String>();
        Set<Integer> allTaskIds = new HashSet<Integer>();
        for (String componentId : expectedContext.getComponentIds()) {
            List<Integer> possibleTasks = expectedContext.getComponentTasks(componentId);
            List<Integer> tasks = topologyContext.getComponentTasks(componentId);
            Iterator<Integer> p_it = possibleTasks.iterator();
            Iterator<Integer> t_it = tasks.iterator();
            while (p_it.hasNext()) {
                Assert.assertTrue(t_it.hasNext());
                Assert.assertNull(taskToComponents.put(p_it.next(), componentId));
                Assert.assertTrue(allTaskIds.add(t_it.next()));
            }
            Assert.assertFalse(t_it.hasNext());
        }
        Assert.assertEquals(taskToComponents, expectedContext.getTaskToComponent());
        Assert.assertTrue(taskIds.add(topologyContext.getThisTaskId()));
        try {
            topologyContext.getHooks();
            Assert.fail();
        } catch (UnsupportedOperationException e) {
        /* expected */
        }
        try {
            topologyContext.getRegisteredMetricByName(null);
            Assert.fail();
        } catch (UnsupportedOperationException e) {
        /* expected */
        }
    }
}
Also used : LocalCluster(org.apache.storm.LocalCluster) StreamingRuntimeContext(org.apache.flink.streaming.api.operators.StreamingRuntimeContext) TopologyBuilder(org.apache.storm.topology.TopologyBuilder) IComponent(org.apache.storm.topology.IComponent) Config(org.apache.storm.Config) StormTopology(org.apache.storm.generated.StormTopology) TestDummySpout(org.apache.flink.storm.util.TestDummySpout) TestSink(org.apache.flink.storm.util.TestSink) TopologyContext(org.apache.storm.task.TopologyContext) IRichBolt(org.apache.storm.topology.IRichBolt) ComponentCommon(org.apache.storm.generated.ComponentCommon) FlinkTopology(org.apache.flink.storm.api.FlinkTopology) TestDummyBolt(org.apache.flink.storm.util.TestDummyBolt) Test(org.junit.Test) AbstractTest(org.apache.flink.storm.util.AbstractTest)

Example 89 with StormTopology

use of org.apache.storm.generated.StormTopology in project storm by apache.

the class Zookeeper method leaderLatchListenerImpl.

// Leader latch listener that will be invoked when we either gain or lose leadership
public static LeaderLatchListener leaderLatchListenerImpl(final Map conf, final CuratorFramework zk, final BlobStore blobStore, final LeaderLatch leaderLatch) throws UnknownHostException {
    final String hostName = InetAddress.getLocalHost().getCanonicalHostName();
    return new LeaderLatchListener() {

        final String STORM_JAR_SUFFIX = "-stormjar.jar";

        final String STORM_CODE_SUFFIX = "-stormcode.ser";

        final String STORM_CONF_SUFFIX = "-stormconf.ser";

        @Override
        public void isLeader() {
            Set<String> activeTopologyIds = new TreeSet<>(Zookeeper.getChildren(zk, conf.get(Config.STORM_ZOOKEEPER_ROOT) + ClusterUtils.STORMS_SUBTREE, false));
            Set<String> activeTopologyBlobKeys = populateTopologyBlobKeys(activeTopologyIds);
            Set<String> activeTopologyCodeKeys = filterTopologyCodeKeys(activeTopologyBlobKeys);
            Set<String> allLocalBlobKeys = Sets.newHashSet(blobStore.listKeys());
            Set<String> allLocalTopologyBlobKeys = filterTopologyBlobKeys(allLocalBlobKeys);
            // this finds all active topologies blob keys from all local topology blob keys
            Sets.SetView<String> diffTopology = Sets.difference(activeTopologyBlobKeys, allLocalTopologyBlobKeys);
            LOG.info("active-topology-blobs [{}] local-topology-blobs [{}] diff-topology-blobs [{}]", generateJoinedString(activeTopologyIds), generateJoinedString(allLocalTopologyBlobKeys), generateJoinedString(diffTopology));
            if (diffTopology.isEmpty()) {
                Set<String> activeTopologyDependencies = getTopologyDependencyKeys(activeTopologyCodeKeys);
                // this finds all dependency blob keys from active topologies from all local blob keys
                Sets.SetView<String> diffDependencies = Sets.difference(activeTopologyDependencies, allLocalBlobKeys);
                LOG.info("active-topology-dependencies [{}] local-blobs [{}] diff-topology-dependencies [{}]", generateJoinedString(activeTopologyDependencies), generateJoinedString(allLocalBlobKeys), generateJoinedString(diffDependencies));
                if (diffDependencies.isEmpty()) {
                    LOG.info("Accepting leadership, all active topologies and corresponding dependencies found locally.");
                } else {
                    LOG.info("Code for all active topologies is available locally, but some dependencies are not found locally, giving up leadership.");
                    closeLatch();
                }
            } else {
                LOG.info("code for all active topologies not available locally, giving up leadership.");
                closeLatch();
            }
        }

        @Override
        public void notLeader() {
            LOG.info("{} lost leadership.", hostName);
        }

        private String generateJoinedString(Set<String> activeTopologyIds) {
            return Joiner.on(",").join(activeTopologyIds);
        }

        private Set<String> populateTopologyBlobKeys(Set<String> activeTopologyIds) {
            Set<String> activeTopologyBlobKeys = new TreeSet<>();
            for (String activeTopologyId : activeTopologyIds) {
                activeTopologyBlobKeys.add(activeTopologyId + STORM_JAR_SUFFIX);
                activeTopologyBlobKeys.add(activeTopologyId + STORM_CODE_SUFFIX);
                activeTopologyBlobKeys.add(activeTopologyId + STORM_CONF_SUFFIX);
            }
            return activeTopologyBlobKeys;
        }

        private Set<String> filterTopologyBlobKeys(Set<String> blobKeys) {
            Set<String> topologyBlobKeys = new HashSet<>();
            for (String blobKey : blobKeys) {
                if (blobKey.endsWith(STORM_JAR_SUFFIX) || blobKey.endsWith(STORM_CODE_SUFFIX) || blobKey.endsWith(STORM_CONF_SUFFIX)) {
                    topologyBlobKeys.add(blobKey);
                }
            }
            return topologyBlobKeys;
        }

        private Set<String> filterTopologyCodeKeys(Set<String> blobKeys) {
            Set<String> topologyCodeKeys = new HashSet<>();
            for (String blobKey : blobKeys) {
                if (blobKey.endsWith(STORM_CODE_SUFFIX)) {
                    topologyCodeKeys.add(blobKey);
                }
            }
            return topologyCodeKeys;
        }

        private Set<String> getTopologyDependencyKeys(Set<String> activeTopologyCodeKeys) {
            Set<String> activeTopologyDependencies = new TreeSet<>();
            Subject subject = ReqContext.context().subject();
            for (String activeTopologyCodeKey : activeTopologyCodeKeys) {
                try {
                    InputStreamWithMeta blob = blobStore.getBlob(activeTopologyCodeKey, subject);
                    byte[] blobContent = IOUtils.readFully(blob, new Long(blob.getFileLength()).intValue());
                    StormTopology stormCode = Utils.deserialize(blobContent, StormTopology.class);
                    if (stormCode.is_set_dependency_jars()) {
                        activeTopologyDependencies.addAll(stormCode.get_dependency_jars());
                    }
                    if (stormCode.is_set_dependency_artifacts()) {
                        activeTopologyDependencies.addAll(stormCode.get_dependency_artifacts());
                    }
                } catch (AuthorizationException | KeyNotFoundException | IOException e) {
                    LOG.error("Exception occurs while reading blob for key: " + activeTopologyCodeKey + ", exception: " + e, e);
                    throw new RuntimeException("Exception occurs while reading blob for key: " + activeTopologyCodeKey + ", exception: " + e, e);
                }
            }
            return activeTopologyDependencies;
        }

        private void closeLatch() {
            try {
                leaderLatch.close();
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        }
    };
}
Also used : AuthorizationException(org.apache.storm.generated.AuthorizationException) StormTopology(org.apache.storm.generated.StormTopology) IOException(java.io.IOException) Subject(javax.security.auth.Subject) InputStreamWithMeta(org.apache.storm.blobstore.InputStreamWithMeta) Sets(com.google.common.collect.Sets) LeaderLatchListener(org.apache.curator.framework.recipes.leader.LeaderLatchListener) KeyNotFoundException(org.apache.storm.generated.KeyNotFoundException)

Example 90 with StormTopology

use of org.apache.storm.generated.StormTopology in project ignite by apache.

the class StormIgniteStreamerSelfTest method testStormStreamerIgniteBolt.

/**
 * Tests for the streamer bolt. Ignite started in bolt based on what is specified in the configuration file.
 *
 * @throws TimeoutException
 * @throws InterruptedException
 */
public void testStormStreamerIgniteBolt() throws TimeoutException, InterruptedException {
    final StormStreamer<String, String> stormStreamer = new StormStreamer<>();
    stormStreamer.setAutoFlushFrequency(10L);
    stormStreamer.setAllowOverwrite(true);
    stormStreamer.setCacheName(TEST_CACHE);
    stormStreamer.setIgniteTupleField(TestStormSpout.IGNITE_TUPLE_FIELD);
    stormStreamer.setIgniteConfigFile(GRID_CONF_FILE);
    Config daemonConf = new Config();
    daemonConf.put(Config.STORM_LOCAL_MODE_ZMQ, false);
    MkClusterParam mkClusterParam = new MkClusterParam();
    mkClusterParam.setDaemonConf(daemonConf);
    mkClusterParam.setSupervisors(4);
    final CountDownLatch latch = new CountDownLatch(TestStormSpout.CNT);
    IgniteBiPredicate<UUID, CacheEvent> putLsnr = new IgniteBiPredicate<UUID, CacheEvent>() {

        @Override
        public boolean apply(UUID uuid, CacheEvent evt) {
            assert evt != null;
            latch.countDown();
            return true;
        }
    };
    final UUID putLsnrId = ignite.events(ignite.cluster().forCacheNodes(TEST_CACHE)).remoteListen(putLsnr, null, EVT_CACHE_OBJECT_PUT);
    Testing.withSimulatedTimeLocalCluster(mkClusterParam, new TestJob() {

        @Override
        public void run(ILocalCluster cluster) throws IOException, InterruptedException {
            // Creates a test topology.
            TopologyBuilder builder = new TopologyBuilder();
            TestStormSpout testStormSpout = new TestStormSpout();
            builder.setSpout("test-spout", testStormSpout);
            builder.setBolt("ignite-bolt", stormStreamer, STORM_EXECUTORS).shuffleGrouping("test-spout");
            StormTopology topology = builder.createTopology();
            // Prepares a mock data for the spout.
            MockedSources mockedSources = new MockedSources();
            mockedSources.addMockData("test-spout", getMockData());
            // Prepares the config.
            Config conf = new Config();
            conf.setMessageTimeoutSecs(10);
            IgniteCache<Integer, String> cache = ignite.cache(TEST_CACHE);
            CompleteTopologyParam completeTopologyParam = new CompleteTopologyParam();
            completeTopologyParam.setTimeoutMs(10000);
            completeTopologyParam.setMockedSources(mockedSources);
            completeTopologyParam.setStormConf(conf);
            // Checks the cache doesn't contain any entries yet.
            assertEquals(0, cache.size(CachePeekMode.PRIMARY));
            Testing.completeTopology(cluster, topology, completeTopologyParam);
            // Checks events successfully processed in 20 seconds.
            assertTrue(latch.await(10, TimeUnit.SECONDS));
            ignite.events(ignite.cluster().forCacheNodes(TEST_CACHE)).stopRemoteListen(putLsnrId);
            // Validates all entries are in the cache.
            assertEquals(TestStormSpout.CNT, cache.size(CachePeekMode.PRIMARY));
            for (Map.Entry<Integer, String> entry : TestStormSpout.getKeyValMap().entrySet()) assertEquals(entry.getValue(), cache.get(entry.getKey()));
        }
    });
}
Also used : TestJob(org.apache.storm.testing.TestJob) IgniteBiPredicate(org.apache.ignite.lang.IgniteBiPredicate) TopologyBuilder(org.apache.storm.topology.TopologyBuilder) Config(org.apache.storm.Config) StormTopology(org.apache.storm.generated.StormTopology) IgniteCache(org.apache.ignite.IgniteCache) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) MkClusterParam(org.apache.storm.testing.MkClusterParam) ILocalCluster(org.apache.storm.ILocalCluster) MockedSources(org.apache.storm.testing.MockedSources) CompleteTopologyParam(org.apache.storm.testing.CompleteTopologyParam) CacheEvent(org.apache.ignite.events.CacheEvent) UUID(java.util.UUID)

Aggregations

StormTopology (org.apache.storm.generated.StormTopology)162 Config (org.apache.storm.Config)72 HashMap (java.util.HashMap)67 Test (org.junit.Test)59 TopologyBuilder (org.apache.storm.topology.TopologyBuilder)44 Map (java.util.Map)35 ArrayList (java.util.ArrayList)29 TopologyDetails (org.apache.storm.scheduler.TopologyDetails)27 Test (org.junit.jupiter.api.Test)26 List (java.util.List)24 Bolt (org.apache.storm.generated.Bolt)23 Values (org.apache.storm.tuple.Values)23 StormMetricsRegistry (org.apache.storm.metric.StormMetricsRegistry)22 Cluster (org.apache.storm.scheduler.Cluster)22 SupervisorDetails (org.apache.storm.scheduler.SupervisorDetails)22 Topologies (org.apache.storm.scheduler.Topologies)22 Fields (org.apache.storm.tuple.Fields)22 INimbus (org.apache.storm.scheduler.INimbus)21 TopologyDef (org.apache.storm.flux.model.TopologyDef)20 TestUtilsForResourceAwareScheduler (org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler)20