use of com.ibm.streamsx.kafka.test.utils.Message in project streamsx.kafka by IBMStreams.
the class KafkaOperatorsTopicPartitionTest method kafkaTopicPartitionTest.
@Test
public void kafkaTopicPartitionTest() throws Exception {
Topology topo = getTopology();
topo.addFileDependency("etc/custom_partitioner.properties", "etc");
topo.addFileDependency("etc/custompartitioner.jar", "etc");
// create producer
TStream<Message<Integer, String>> src = topo.limitedSource(new MySupplier(), 9).modify(new Delay<>(Constants.PRODUCER_DELAY));
SPLStream outStream = SPLStreams.convertStream(src, new MessageConverter(), SCHEMA);
SPL.invokeSink(Constants.KafkaProducerOp, outStream, getKafkaProducerParams());
// create the consumers
SPLStream msgStream1 = createConsumer(topo, 0);
SPLStream msgStream2 = createConsumer(topo, 1);
SPLStream msgStream3 = createConsumer(topo, 2);
Set<TStream<Tuple>> s = new HashSet<>();
s.add(msgStream2);
s.add(msgStream3);
TStream<String> unionStream = msgStream1.union(s).transform(t -> t.getString("message"));
SPLStream msgStream = SPLStreams.stringToSPLStream(unionStream);
StreamsContext<?> context = StreamsContextFactory.getStreamsContext(Type.DISTRIBUTED_TESTER);
Tester tester = topo.getTester();
String[] expectedArr = { "A0", "B1", "C2", "A3", "B4", "C5", "A6", "B7", "C8" };
Condition<List<String>> condition = KafkaSPLStreamsUtils.stringContentsUnordered(tester, msgStream, expectedArr);
tester.complete(context, new HashMap<>(), condition, 30, TimeUnit.SECONDS);
// check the results
Assert.assertTrue(condition.getResult().size() > 0);
Assert.assertTrue(condition.getResult().toString(), condition.valid());
}
use of com.ibm.streamsx.kafka.test.utils.Message in project streamsx.kafka by IBMStreams.
the class KafkaProducerPartitionAttrTest method kafkaProducerPartitionAttrTest.
@Test
public void kafkaProducerPartitionAttrTest() throws Exception {
Topology topo = getTopology();
topo.addFileDependency("etc/custom_partitioner.properties", "etc");
topo.addFileDependency("etc/custompartitioner.jar", "etc");
// create producer
TStream<Message<Integer, String>> src = topo.limitedSource(new MySupplier(), 9).modify(new Delay<>(Constants.PRODUCER_DELAY));
SPLStream outStream = SPLStreams.convertStream(src, new MessageConverter(), PRODUCER_SCHEMA);
SPL.invokeSink(Constants.KafkaProducerOp, outStream, getKafkaProducerParams());
// create the consumers
SPLStream msgStream1 = createConsumer(topo, PARTITION_NUM);
TStream<String> unionStream = msgStream1.transform(t -> t.getString("message"));
SPLStream msgStream = SPLStreams.stringToSPLStream(unionStream);
StreamsContext<?> context = StreamsContextFactory.getStreamsContext(Type.DISTRIBUTED_TESTER);
Tester tester = topo.getTester();
String[] expectedArr = { "A0", "B1", "C2", "A3", "B4", "C5", "A6", "B7", "C8" };
Condition<List<String>> condition = KafkaSPLStreamsUtils.stringContentsUnordered(tester, msgStream, expectedArr);
tester.complete(context, new HashMap<>(), condition, 30, TimeUnit.SECONDS);
// check the results
Assert.assertTrue(condition.getResult().size() > 0);
Assert.assertTrue(condition.getResult().toString(), condition.valid());
}
Aggregations