use of com.ibm.streamsx.topology.TStream in project streamsx.kafka by IBMStreams.
the class KafkaOperatorsGreenThread method kafkaGreenThread.
@Test
public void kafkaGreenThread() throws Exception {
Topology topo = getTopology();
// create the producer (produces tuples after a short delay)
TStream<String> stringSrcStream = topo.strings(Constants.STRING_DATA).modify(new Delay<>(5000));
SPL.invokeSink(Constants.KafkaProducerOp, KafkaSPLStreamsUtils.convertStreamToKafkaTuple(stringSrcStream), getKafkaParams());
// create the consumer
SPLStream consumerStream = SPL.invokeSource(topo, Constants.KafkaConsumerOp, getKafkaParams(), KafkaSPLStreamsUtils.STRING_SCHEMA);
SPLStream msgStream = SPLStreams.stringToSPLStream(consumerStream.convert(t -> t.getString("message")));
// test the output of the consumer
StreamsContext<?> context = StreamsContextFactory.getStreamsContext(Type.DISTRIBUTED_TESTER);
Tester tester = topo.getTester();
Condition<List<String>> condition = KafkaSPLStreamsUtils.stringContentsUnordered(tester, msgStream, Constants.STRING_DATA);
tester.complete(context, new HashMap<>(), condition, 30, TimeUnit.SECONDS);
// check the results
Assert.assertTrue(condition.getResult().size() > 0);
Assert.assertTrue(condition.getResult().toString(), condition.valid());
}
use of com.ibm.streamsx.topology.TStream in project streamsx.kafka by IBMStreams.
the class KafkaOperatorsLongTypeTest method kafkaLongTypeTest.
@Test
public void kafkaLongTypeTest() throws Exception {
Topology topo = getTopology();
StreamSchema schema = KafkaSPLStreamsUtils.LONG_SCHEMA;
// create the producer (produces tuples after a short delay)
TStream<Long> srcStream = topo.strings(DATA).transform(s -> Long.valueOf(s)).modify(new Delay<>(5000));
SPLStream splSrcStream = SPLStreams.convertStream(srcStream, new Converter(), schema);
SPL.invokeSink(Constants.KafkaProducerOp, splSrcStream, getKafkaParams());
// create the consumer
SPLStream consumerStream = SPL.invokeSource(topo, Constants.KafkaConsumerOp, getKafkaParams(), schema);
SPLStream msgStream = SPLStreams.stringToSPLStream(consumerStream.convert(t -> String.valueOf(t.getLong("message"))));
// test the output of the consumer
StreamsContext<?> context = StreamsContextFactory.getStreamsContext(Type.DISTRIBUTED_TESTER);
Tester tester = topo.getTester();
Condition<List<String>> condition = KafkaSPLStreamsUtils.stringContentsUnordered(tester, msgStream, DATA);
tester.complete(context, new HashMap<>(), condition, 30, TimeUnit.SECONDS);
// check the results
Assert.assertTrue(condition.getResult().size() > 0);
Assert.assertTrue(condition.getResult().toString(), condition.valid());
}
use of com.ibm.streamsx.topology.TStream in project streamsx.kafka by IBMStreams.
the class KafkaOperatorsMultipleTopics method kafkaMultipleTopicsTest.
@Test
public void kafkaMultipleTopicsTest() throws Exception {
Topology topo = getTopology();
// create the producer (produces tuples after a short delay)
TStream<String> stringSrcStream = topo.strings(Constants.STRING_DATA).modify(new Delay<>(Constants.PRODUCER_DELAY));
SPL.invokeSink(Constants.KafkaProducerOp, KafkaSPLStreamsUtils.convertStreamToKafkaTuple(stringSrcStream), getKafkaParams());
// create the consumer
SPLStream consumerStream = SPL.invokeSource(topo, Constants.KafkaConsumerOp, getKafkaParams(), KafkaSPLStreamsUtils.STRING_SCHEMA);
SPLStream msgStream = SPLStreams.stringToSPLStream(consumerStream.convert(t -> t.getString("message")));
// test the output of the consumer
StreamsContext<?> context = StreamsContextFactory.getStreamsContext(Type.DISTRIBUTED_TESTER);
Tester tester = topo.getTester();
String[] expectedArr = KafkaSPLStreamsUtils.duplicateArrayEntries(Constants.STRING_DATA, 3);
Condition<List<String>> condition = KafkaSPLStreamsUtils.stringContentsUnordered(tester, msgStream, expectedArr);
tester.complete(context, new HashMap<>(), condition, 30, TimeUnit.SECONDS);
// check the results
Assert.assertTrue(condition.getResult().size() > 0);
Assert.assertTrue(condition.getResult().toString(), condition.valid());
}
use of com.ibm.streamsx.topology.TStream in project streamsx.kafka by IBMStreams.
the class KafkaOperatorsStartPositionTest method kafkaStartPositionTest.
@Test
public void kafkaStartPositionTest() throws Exception {
Topology producerTopo = createTopology("producerTopo");
// create the producer (produces tuples after a short delay)
Map<String, Object> producerProps = new HashMap<>();
producerProps.put("topic", Constants.TOPIC_POS);
producerProps.put("propertiesFile", Constants.PROPERTIES_FILE_PATH);
TStream<String> stringSrcStream = producerTopo.strings(Constants.STRING_DATA);
SPL.invokeSink(Constants.KafkaProducerOp, KafkaSPLStreamsUtils.convertStreamToKafkaTuple(stringSrcStream), producerProps);
// Launch the producer and allow it to finish writing
// the data to the queue. Consumer should run AFTER
// producer is finished.
@SuppressWarnings("unchecked") Future<BigInteger> future = (Future<BigInteger>) StreamsContextFactory.getStreamsContext(Type.STANDALONE).submit(producerTopo);
future.get();
Thread.sleep(TimeUnit.SECONDS.toMillis(20));
if (!future.isDone()) {
future.cancel(true);
}
// create the consumer
Topology topo = getTopology();
Map<String, Object> consumerParams = new HashMap<>();
consumerParams.put("topic", Constants.TOPIC_POS);
consumerParams.put("propertiesFile", Constants.PROPERTIES_FILE_PATH);
consumerParams.put("startPosition", StartPosition.Beginning);
SPLStream consumerStream = SPL.invokeSource(topo, Constants.KafkaConsumerOp, consumerParams, KafkaSPLStreamsUtils.STRING_SCHEMA);
SPLStream msgStream = SPLStreams.stringToSPLStream(consumerStream.convert(t -> t.getString("message")));
// test the output of the consumer
StreamsContext<?> context = StreamsContextFactory.getStreamsContext(Type.DISTRIBUTED_TESTER);
Tester tester = topo.getTester();
Condition<List<String>> condition = KafkaSPLStreamsUtils.stringContentsUnordered(tester, msgStream, Constants.STRING_DATA);
tester.complete(context, new HashMap<>(), condition, 30, TimeUnit.SECONDS);
// check the results
Assert.assertTrue(condition.getResult().size() > 0);
Assert.assertTrue(condition.getResult().toString(), condition.valid());
}
use of com.ibm.streamsx.topology.TStream in project streamsx.kafka by IBMStreams.
the class KafkaOperatorsTopicPartitionTest method kafkaTopicPartitionTest.
@Test
public void kafkaTopicPartitionTest() throws Exception {
Topology topo = getTopology();
topo.addFileDependency("etc/custom_partitioner.properties", "etc");
topo.addFileDependency("etc/custompartitioner.jar", "etc");
// create producer
TStream<Message<Integer, String>> src = topo.limitedSource(new MySupplier(), 9).modify(new Delay<>(Constants.PRODUCER_DELAY));
SPLStream outStream = SPLStreams.convertStream(src, new MessageConverter(), SCHEMA);
SPL.invokeSink(Constants.KafkaProducerOp, outStream, getKafkaProducerParams());
// create the consumers
SPLStream msgStream1 = createConsumer(topo, 0);
SPLStream msgStream2 = createConsumer(topo, 1);
SPLStream msgStream3 = createConsumer(topo, 2);
Set<TStream<Tuple>> s = new HashSet<>();
s.add(msgStream2);
s.add(msgStream3);
TStream<String> unionStream = msgStream1.union(s).transform(t -> t.getString("message"));
SPLStream msgStream = SPLStreams.stringToSPLStream(unionStream);
StreamsContext<?> context = StreamsContextFactory.getStreamsContext(Type.DISTRIBUTED_TESTER);
Tester tester = topo.getTester();
String[] expectedArr = { "A0", "B1", "C2", "A3", "B4", "C5", "A6", "B7", "C8" };
Condition<List<String>> condition = KafkaSPLStreamsUtils.stringContentsUnordered(tester, msgStream, expectedArr);
tester.complete(context, new HashMap<>(), condition, 30, TimeUnit.SECONDS);
// check the results
Assert.assertTrue(condition.getResult().size() > 0);
Assert.assertTrue(condition.getResult().toString(), condition.valid());
}
Aggregations