use of org.apache.storm.testing.FeederSpout in project storm by apache.
the class TopologyIntegrationTest method testResetTimeout.
@Test
public void testResetTimeout() throws Exception {
try (LocalCluster cluster = new LocalCluster.Builder().withSimulatedTime().withDaemonConf(Collections.singletonMap(Config.TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS, true)).build()) {
FeederSpout feeder = new FeederSpout(new Fields("field1"));
AckFailMapTracker tracker = new AckFailMapTracker();
feeder.setAckFailDelegate(tracker);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("1", feeder);
builder.setBolt("2", new ResetTimeoutBolt()).globalGrouping("1");
StormTopology topology = builder.createTopology();
cluster.submitTopology("reset-timeout-tester", Collections.singletonMap(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS, 10), topology);
// The first tuple wil be used to check timeout reset
feeder.feed(new Values("a"), 1);
// The second tuple is used to wait for the spout to rotate its pending map
feeder.feed(new Values("b"), 2);
cluster.advanceClusterTime(9);
// The other tuples are used to reset the first tuple's timeout,
// and to wait for the message to get through to the spout (acks use the same path as timeout resets)
feeder.feed(new Values("c"), 3);
assertAcked(tracker, 3);
cluster.advanceClusterTime(9);
feeder.feed(new Values("d"), 4);
assertAcked(tracker, 4);
cluster.advanceClusterTime(2);
// The time is now twice the message timeout, the second tuple should expire since it was not acked
// Waiting for this also ensures that the first tuple gets failed if reset-timeout doesn't work
assertFailed(tracker, 2);
// Put in a tuple to cause the first tuple to be acked
feeder.feed(new Values("e"), 5);
assertAcked(tracker, 5);
// The first tuple should be acked, and should not have failed
assertThat(tracker.isFailed(1), is(false));
assertAcked(tracker, 1);
}
}
use of org.apache.storm.testing.FeederSpout in project storm by apache.
the class JoinBoltExample method main.
public static void main(String[] args) throws Exception {
if (!NimbusClient.isLocalOverride()) {
throw new IllegalStateException("This example only works in local mode. " + "Run with storm local not storm jar");
}
FeederSpout genderSpout = new FeederSpout(new Fields("id", "gender"));
FeederSpout ageSpout = new FeederSpout(new Fields("id", "age"));
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("genderSpout", genderSpout);
builder.setSpout("ageSpout", ageSpout);
// inner join of 'age' and 'gender' records on 'id' field
JoinBolt joiner = new JoinBolt("genderSpout", "id").join("ageSpout", "id", "genderSpout").select("genderSpout:id,ageSpout:id,gender,age").withTumblingWindow(new BaseWindowedBolt.Duration(10, TimeUnit.SECONDS));
builder.setBolt("joiner", joiner).fieldsGrouping("genderSpout", new Fields("id")).fieldsGrouping("ageSpout", new Fields("id"));
builder.setBolt("printer", new PrinterBolt()).shuffleGrouping("joiner");
Config conf = new Config();
StormSubmitter.submitTopologyWithProgressBar("join-example", conf, builder.createTopology());
generateGenderData(genderSpout);
generateAgeData(ageSpout);
}
use of org.apache.storm.testing.FeederSpout in project flink by apache.
the class SingleJoinExample method main.
public static void main(String[] args) throws Exception {
final FeederSpout genderSpout = new FeederSpout(new Fields("id", "gender", "hobbies"));
final FeederSpout ageSpout = new FeederSpout(new Fields("id", "age"));
Config conf = new Config();
TopologyBuilder builder = new TopologyBuilder();
// only required to stabilize integration test
conf.put(FlinkLocalCluster.SUBMIT_BLOCKING, true);
final NullTerminatingSpout finalGenderSpout = new NullTerminatingSpout(genderSpout);
final NullTerminatingSpout finalAgeSpout = new NullTerminatingSpout(ageSpout);
builder.setSpout("gender", finalGenderSpout);
builder.setSpout("age", finalAgeSpout);
builder.setBolt("join", new SingleJoinBolt(new Fields("gender", "age"))).fieldsGrouping("gender", new Fields("id")).fieldsGrouping("age", new Fields("id"));
// emit result
if (args.length > 0) {
// read the text file from given input path
builder.setBolt("fileOutput", new BoltFileSink(args[0], new TupleOutputFormatter())).shuffleGrouping("join");
} else {
builder.setBolt("print", new PrinterBolt()).shuffleGrouping("join");
}
String[] hobbies = new String[] { "reading", "biking", "travelling", "watching tv" };
for (int i = 0; i < 10; i++) {
String gender;
if (i % 2 == 0) {
gender = "male";
} else {
gender = "female";
}
genderSpout.feed(new Values(i, gender, hobbies[i % hobbies.length]));
}
for (int i = 9; i >= 0; i--) {
ageSpout.feed(new Values(i, i + 20));
}
final FlinkLocalCluster cluster = FlinkLocalCluster.getLocalCluster();
cluster.submitTopology("joinTopology", conf, FlinkTopology.createTopology(builder));
cluster.shutdown();
}
use of org.apache.storm.testing.FeederSpout in project storm by apache.
the class TickTupleTest method testTickTupleWorksWithSystemBolt.
@Test
public void testTickTupleWorksWithSystemBolt() throws Exception {
try (ILocalCluster cluster = new LocalCluster.Builder().withSimulatedTime().build()) {
TopologyBuilder builder = new TopologyBuilder();
FeederSpout feeder = new FeederSpout(new Fields("field1"));
AckFailMapTracker tracker = new AckFailMapTracker();
feeder.setAckFailDelegate(tracker);
builder.setSpout("Spout", feeder);
builder.setBolt("Bolt", new NoopBolt()).shuffleGrouping("Spout");
Config topoConf = new Config();
topoConf.put(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, TICK_INTERVAL_SECS);
try (ILocalTopology topo = cluster.submitTopology("test", topoConf, builder.createTopology())) {
// Use a bootstrap tuple to wait for topology to be running
feeder.feed(new Values("val"), 1);
AssertLoop.assertAcked(tracker, 1);
/*
* Verify that some ticks are received. The interval between ticks is validated by the bolt.
* Too few and the checks will time out. Too many and the bolt may crash (not reliably, but the test should become flaky).
*/
try {
cluster.advanceClusterTime(TICK_INTERVAL_SECS);
waitForTicks(1);
cluster.advanceClusterTime(TICK_INTERVAL_SECS);
waitForTicks(2);
cluster.advanceClusterTime(TICK_INTERVAL_SECS);
waitForTicks(3);
} catch (ConditionTimeoutException e) {
throw new AssertionError(e.getMessage());
}
assertNull("The bolt got a tuple that is not a tick tuple " + nonTickTuple.get(), nonTickTuple.get());
}
}
}
use of org.apache.storm.testing.FeederSpout in project storm by apache.
the class TestingTest method testAdvanceClusterTime.
@Test
public void testAdvanceClusterTime() throws Exception {
Config daemonConf = new Config();
daemonConf.put(Config.TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS, true);
try (LocalCluster cluster = new LocalCluster.Builder().withDaemonConf(daemonConf).withSimulatedTime().build()) {
FeederSpout feeder = new FeederSpout(new Fields("field1"));
AckFailMapTracker tracker = new AckFailMapTracker();
feeder.setAckFailDelegate(tracker);
Map<String, Thrift.SpoutDetails> spoutMap = new HashMap<>();
spoutMap.put("1", Thrift.prepareSpoutDetails(feeder));
Map<String, Thrift.BoltDetails> boltMap = new HashMap<>();
boltMap.put("2", Thrift.prepareBoltDetails(Collections.singletonMap(Utils.getGlobalStreamId("1", null), Thrift.prepareShuffleGrouping()), new AckEveryOtherBolt()));
StormTopology topology = Thrift.buildTopology(spoutMap, boltMap);
Config stormConf = new Config();
stormConf.put(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS, 10);
cluster.submitTopology("timeout-tester", stormConf, topology);
feeder.feed(new Values("a"), 1);
feeder.feed(new Values("b"), 2);
feeder.feed(new Values("c"), 3);
cluster.advanceClusterTime(9);
assertAcked(tracker, 1, 3);
assertThat(tracker.isFailed(2), is(false));
cluster.advanceClusterTime(12);
assertFailed(tracker, 2);
}
}
Aggregations