use of org.apache.storm.task.OutputCollector in project storm by apache.
the class TestHiveBolt method testNoAcksUntilFlushed.
@Test
public void testNoAcksUntilFlushed() {
JsonRecordHiveMapper mapper = new JsonRecordHiveMapper().withColumnFields(new Fields(colNames1)).withPartitionFields(new Fields(partNames));
HiveOptions hiveOptions = new HiveOptions(metaStoreURI, dbName, tblName, mapper).withTxnsPerBatch(2).withBatchSize(2);
bolt = new TestingHiveBolt(hiveOptions);
bolt.prepare(config, null, new OutputCollector(collector));
Tuple tuple1 = generateTestTuple(1, "SJC", "Sunnyvale", "CA");
Tuple tuple2 = generateTestTuple(2, "SFO", "San Jose", "CA");
bolt.execute(tuple1);
verifyZeroInteractions(collector);
bolt.execute(tuple2);
verify(collector).ack(tuple1);
verify(collector).ack(tuple2);
bolt.cleanup();
}
use of org.apache.storm.task.OutputCollector in project storm by apache.
the class TestHiveBolt method testNoTickEmptyBatches.
@Test
public void testNoTickEmptyBatches() throws Exception {
JsonRecordHiveMapper mapper = new JsonRecordHiveMapper().withColumnFields(new Fields(colNames1)).withPartitionFields(new Fields(partNames));
HiveOptions hiveOptions = new HiveOptions(metaStoreURI, dbName, tblName, mapper).withTxnsPerBatch(2).withBatchSize(2);
bolt = new TestingHiveBolt(hiveOptions);
bolt.prepare(config, null, new OutputCollector(collector));
// The tick should NOT cause any acks since the batch was empty except for acking itself
Tuple mockTick = MockTupleHelpers.mockTickTuple();
bolt.execute(mockTick);
verifyZeroInteractions(collector);
bolt.cleanup();
}
use of org.apache.storm.task.OutputCollector in project storm by apache.
the class StreamBuilderTest method testMultiPartitionByKeyWithRepartition.
@Test
public void testMultiPartitionByKeyWithRepartition() {
TopologyContext mockContext = Mockito.mock(TopologyContext.class);
OutputCollector mockCollector = Mockito.mock(OutputCollector.class);
Map<GlobalStreamId, Grouping> expected = new HashMap<>();
expected.put(new GlobalStreamId("bolt2", "s3"), Grouping.fields(Collections.singletonList("key")));
expected.put(new GlobalStreamId("bolt2", "s3__punctuation"), Grouping.all(new NullStruct()));
Stream<Integer> stream = streamBuilder.newStream(newSpout(Utils.DEFAULT_STREAM_ID), new ValueMapper<>(0));
stream.mapToPair(x -> Pair.of(x, x)).window(TumblingWindows.of(BaseWindowedBolt.Count.of(10))).reduceByKey((x, y) -> x + y).repartition(10).reduceByKey((x, y) -> 0).print();
StormTopology topology = streamBuilder.build();
assertEquals(3, topology.get_bolts_size());
assertEquals(expected, topology.get_bolts().get("bolt3").get_common().get_inputs());
}
use of org.apache.storm.task.OutputCollector in project storm by apache.
the class StreamBuilderTest method testBranchAndJoin.
@Test
public void testBranchAndJoin() throws Exception {
TopologyContext mockContext = Mockito.mock(TopologyContext.class);
OutputCollector mockCollector = Mockito.mock(OutputCollector.class);
Stream<Integer> stream = streamBuilder.newStream(newSpout(Utils.DEFAULT_STREAM_ID), new ValueMapper<>(0), 2);
Stream<Integer>[] streams = stream.branch(x -> x % 2 == 0, x -> x % 2 == 1);
PairStream<Integer, Pair<Integer, Integer>> joined = streams[0].mapToPair(x -> Pair.of(x, 1)).join(streams[1].mapToPair(x -> Pair.of(x, 1)));
assertTrue(joined.getNode() instanceof ProcessorNode);
StormTopology topology = streamBuilder.build();
assertEquals(2, topology.get_bolts_size());
}
use of org.apache.storm.task.OutputCollector in project storm by apache.
the class TridentBoltExecutor method prepare.
@Override
public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
_messageTimeoutMs = context.maxTopologyMessageTimeout() * 1000L;
_lastRotate = System.currentTimeMillis();
_batches = new RotatingMap<>(2);
_context = context;
_collector = collector;
_coordCollector = new CoordinatedOutputCollector(collector);
_coordOutputCollector = new BatchOutputCollectorImpl(new OutputCollector(_coordCollector));
_coordConditions = (Map) context.getExecutorData("__coordConditions");
if (_coordConditions == null) {
_coordConditions = new HashMap<>();
for (String batchGroup : _coordSpecs.keySet()) {
CoordSpec spec = _coordSpecs.get(batchGroup);
CoordCondition cond = new CoordCondition();
cond.commitStream = spec.commitStream;
cond.expectedTaskReports = 0;
for (String comp : spec.coords.keySet()) {
CoordType ct = spec.coords.get(comp);
if (ct.equals(CoordType.single())) {
cond.expectedTaskReports += 1;
} else {
cond.expectedTaskReports += context.getComponentTasks(comp).size();
}
}
cond.targetTasks = new HashSet<>();
for (String component : Utils.get(context.getThisTargets(), COORD_STREAM(batchGroup), new HashMap<String, Grouping>()).keySet()) {
cond.targetTasks.addAll(context.getComponentTasks(component));
}
_coordConditions.put(batchGroup, cond);
}
context.setExecutorData("_coordConditions", _coordConditions);
}
_bolt.prepare(conf, context, _coordOutputCollector);
}
Aggregations