use of com.ibm.streamsx.topology.context.StreamsContext in project streamsx.kafka by IBMStreams.
the class KafkaOperatorsStartPositionTest method kafkaStartPositionTest.
@Test
public void kafkaStartPositionTest() throws Exception {
Topology producerTopo = createTopology("producerTopo");
// create the producer (produces tuples after a short delay)
Map<String, Object> producerProps = new HashMap<>();
producerProps.put("topic", Constants.TOPIC_POS);
producerProps.put("propertiesFile", Constants.PROPERTIES_FILE_PATH);
TStream<String> stringSrcStream = producerTopo.strings(Constants.STRING_DATA);
SPL.invokeSink(Constants.KafkaProducerOp, KafkaSPLStreamsUtils.convertStreamToKafkaTuple(stringSrcStream), producerProps);
// Launch the producer and allow it to finish writing
// the data to the queue. Consumer should run AFTER
// producer is finished.
@SuppressWarnings("unchecked") Future<BigInteger> future = (Future<BigInteger>) StreamsContextFactory.getStreamsContext(Type.STANDALONE).submit(producerTopo);
future.get();
Thread.sleep(3000L);
if (!future.isDone()) {
future.cancel(true);
}
// create the consumer
Topology topo = getTopology();
topo.addJobControlPlane();
Map<String, Object> consumerParams = new HashMap<>();
consumerParams.put("topic", Constants.TOPIC_POS);
consumerParams.put("propertiesFile", Constants.PROPERTIES_FILE_PATH);
consumerParams.put("startPosition", StartPosition.Beginning);
SPLStream consumerStream = SPL.invokeSource(topo, Constants.KafkaConsumerOp, consumerParams, KafkaSPLStreamsUtils.STRING_SCHEMA);
SPLStream msgStream = SPLStreams.stringToSPLStream(consumerStream.convert(t -> t.getString("message")));
// test the output of the consumer
StreamsContext<?> context = StreamsContextFactory.getStreamsContext(Type.DISTRIBUTED_TESTER);
Tester tester = topo.getTester();
Condition<List<String>> stringContentsUnordered = tester.stringContentsUnordered(msgStream.toStringStream(), Constants.STRING_DATA);
HashMap<String, Object> config = new HashMap<>();
// config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
// config.put(ContextProperties.KEEP_ARTIFACTS, new Boolean(true));
tester.complete(context, config, stringContentsUnordered, 60, TimeUnit.SECONDS);
// check the results
Assert.assertTrue(stringContentsUnordered.valid());
Assert.assertTrue(stringContentsUnordered.getResult().size() == Constants.STRING_DATA.length);
}
use of com.ibm.streamsx.topology.context.StreamsContext in project streamsx.kafka by IBMStreams.
the class KafkaOperatorsDoubleTypeTest method kafkaDoubleTypeTest.
@Test
public void kafkaDoubleTypeTest() throws Exception {
Topology topo = getTopology();
StreamSchema schema = KafkaSPLStreamsUtils.DOUBLE_SCHEMA;
// create the producer (produces tuples after a short delay)
TStream<Double> srcStream = topo.strings(DATA).transform(s -> Double.valueOf(s)).modify(new Delay<>(5000));
SPLStream splSrcStream = SPLStreams.convertStream(srcStream, new Converter(), schema);
SPL.invokeSink(Constants.KafkaProducerOp, splSrcStream, getKafkaParams());
// create the consumer
SPLStream consumerStream = SPL.invokeSource(topo, Constants.KafkaConsumerOp, getKafkaParams(), schema);
SPLStream msgStream = SPLStreams.stringToSPLStream(consumerStream.convert(t -> String.valueOf(t.getDouble("message"))));
// test the output of the consumer
StreamsContext<?> context = StreamsContextFactory.getStreamsContext(Type.DISTRIBUTED_TESTER);
Tester tester = topo.getTester();
Condition<List<String>> stringContentsUnordered = tester.stringContentsUnordered(msgStream.toStringStream(), DATA);
HashMap<String, Object> config = new HashMap<>();
// config.put (ContextProperties.KEEP_ARTIFACTS, new Boolean (true));
// config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
tester.complete(context, config, stringContentsUnordered, 60, TimeUnit.SECONDS);
// check the results
Assert.assertTrue(stringContentsUnordered.valid());
Assert.assertTrue(stringContentsUnordered.getResult().size() == DATA.length);
}
use of com.ibm.streamsx.topology.context.StreamsContext in project streamsx.kafka by IBMStreams.
the class KafkaOperatorsGreenThread method kafkaGreenThread.
@Test
public void kafkaGreenThread() throws Exception {
Topology topo = getTopology();
// create the producer (produces tuples after a short delay)
TStream<String> stringSrcStream = topo.strings(Constants.STRING_DATA).modify(new Delay<>(5000));
SPL.invokeSink(Constants.KafkaProducerOp, KafkaSPLStreamsUtils.convertStreamToKafkaTuple(stringSrcStream), getKafkaParams());
// create the consumer
SPLStream consumerStream = SPL.invokeSource(topo, Constants.KafkaConsumerOp, getKafkaParams(), KafkaSPLStreamsUtils.STRING_SCHEMA);
SPLStream msgStream = SPLStreams.stringToSPLStream(consumerStream.convert(t -> t.getString("message")));
// test the output of the consumer
StreamsContext<?> context = StreamsContextFactory.getStreamsContext(Type.DISTRIBUTED_TESTER);
Tester tester = topo.getTester();
Condition<List<String>> stringContentsUnordered = tester.stringContentsUnordered(msgStream.toStringStream(), Constants.STRING_DATA);
HashMap<String, Object> config = new HashMap<>();
// config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
// config.put(ContextProperties.KEEP_ARTIFACTS, new Boolean(true));
tester.complete(context, config, stringContentsUnordered, 60, TimeUnit.SECONDS);
// check the results
Assert.assertTrue(stringContentsUnordered.valid());
Assert.assertTrue(stringContentsUnordered.getResult().size() == Constants.STRING_DATA.length);
}
use of com.ibm.streamsx.topology.context.StreamsContext in project streamsx.kafka by IBMStreams.
the class KafkaOperatorsLongTypeTest method kafkaLongTypeTest.
@Test
public void kafkaLongTypeTest() throws Exception {
Topology topo = getTopology();
StreamSchema schema = KafkaSPLStreamsUtils.LONG_SCHEMA;
// create the producer (produces tuples after a short delay)
TStream<Long> srcStream = topo.strings(DATA).transform(s -> Long.valueOf(s)).modify(new Delay<>(5000));
SPLStream splSrcStream = SPLStreams.convertStream(srcStream, new Converter(), schema);
SPL.invokeSink(Constants.KafkaProducerOp, splSrcStream, getKafkaParams());
// create the consumer
SPLStream consumerStream = SPL.invokeSource(topo, Constants.KafkaConsumerOp, getKafkaParams(), schema);
SPLStream msgStream = SPLStreams.stringToSPLStream(consumerStream.convert(t -> String.valueOf(t.getLong("message"))));
// test the output of the consumer
StreamsContext<?> context = StreamsContextFactory.getStreamsContext(Type.DISTRIBUTED_TESTER);
Tester tester = topo.getTester();
Condition<List<String>> stringContentsUnordered = tester.stringContentsUnordered(msgStream.toStringStream(), DATA);
HashMap<String, Object> config = new HashMap<>();
// config.put (ContextProperties.KEEP_ARTIFACTS, new Boolean (true));
// config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
tester.complete(context, config, stringContentsUnordered, 60, TimeUnit.SECONDS);
// check the results
Assert.assertTrue(stringContentsUnordered.valid());
Assert.assertTrue(stringContentsUnordered.getResult().size() == DATA.length);
}
use of com.ibm.streamsx.topology.context.StreamsContext in project streamsx.kafka by IBMStreams.
the class KafkaProducerFanOutTest method kafkaFanOutTest.
@Test
public void kafkaFanOutTest() throws Exception {
Topology topo = getTopology();
// create the producers (produces tuples after a short delay)
TStream<String> stringSrcStream = topo.strings(Constants.STRING_DATA).modify(new Delay<>(5000)).lowLatency();
SPL.invokeSink(Constants.KafkaProducerOp, KafkaSPLStreamsUtils.convertStreamToKafkaTuple(stringSrcStream), getKafkaParams());
SPL.invokeSink(Constants.KafkaProducerOp, KafkaSPLStreamsUtils.convertStreamToKafkaTuple(stringSrcStream), getKafkaParams());
// create the consumer
SPLStream consumerStream = SPL.invokeSource(topo, Constants.KafkaConsumerOp, getKafkaParams(), KafkaSPLStreamsUtils.STRING_SCHEMA);
SPLStream msgStream = SPLStreams.stringToSPLStream(consumerStream.convert(t -> t.getString("message")));
// test the output of the consumer
StreamsContext<?> context = StreamsContextFactory.getStreamsContext(Type.DISTRIBUTED_TESTER);
Tester tester = topo.getTester();
// both producers are sending the same data, so each result is duplicated
String[] expectedArr = KafkaSPLStreamsUtils.duplicateArrayEntries(Constants.STRING_DATA, 2);
Condition<List<String>> stringContentsUnordered = tester.stringContentsUnordered(msgStream.toStringStream(), expectedArr);
HashMap<String, Object> config = new HashMap<>();
// config.put (ContextProperties.TRACING_LEVEL, java.util.logging.Level.FINE);
// config.put(ContextProperties.KEEP_ARTIFACTS, new Boolean(true));
tester.complete(context, config, stringContentsUnordered, 60, TimeUnit.SECONDS);
// check the results
Assert.assertTrue(stringContentsUnordered.valid());
Assert.assertTrue(stringContentsUnordered.getResult().size() == Constants.STRING_DATA.length * 2);
}
Aggregations