use of com.ibm.streamsx.topology.spl.SPLStream in project streamsx.kafka by IBMStreams.
the class KafkaOperatorsIntTypeTest method kafkaIntTypeTest.
@Test
public void kafkaIntTypeTest() throws Exception {
Topology topo = getTopology();
StreamSchema schema = KafkaSPLStreamsUtils.INT_SCHEMA;
// create the producer (produces tuples after a short delay)
TStream<Integer> srcStream = topo.strings(DATA).transform(s -> Integer.valueOf(s)).modify(new Delay<>(5000));
SPLStream splSrcStream = SPLStreams.convertStream(srcStream, new Converter(), schema);
SPL.invokeSink(Constants.KafkaProducerOp, splSrcStream, getKafkaParams());
// create the consumer
SPLStream consumerStream = SPL.invokeSource(topo, Constants.KafkaConsumerOp, getKafkaParams(), schema);
SPLStream msgStream = SPLStreams.stringToSPLStream(consumerStream.convert(t -> String.valueOf(t.getInt("message"))));
// test the output of the consumer
StreamsContext<?> context = StreamsContextFactory.getStreamsContext(Type.DISTRIBUTED_TESTER);
Tester tester = topo.getTester();
Condition<List<String>> condition = KafkaSPLStreamsUtils.stringContentsUnordered(tester, msgStream, DATA);
tester.complete(context, new HashMap<>(), condition, 30, TimeUnit.SECONDS);
// check the results
Assert.assertTrue(condition.getResult().size() > 0);
Assert.assertTrue(condition.getResult().toString(), condition.valid());
}
use of com.ibm.streamsx.topology.spl.SPLStream in project streamsx.kafka by IBMStreams.
the class KafkaOperatorsNoKey method kafkaNoKeyTest.
@Test
public void kafkaNoKeyTest() throws Exception {
Topology topo = getTopology();
// create the producer (produces tuples after a short delay)
TStream<String> stringSrcStream = topo.strings(Constants.STRING_DATA).modify(new Delay<>(5000));
SPL.invokeSink(Constants.KafkaProducerOp, KafkaSPLStreamsUtils.convertStreamToKafkaTuple(stringSrcStream, false), getKafkaParams());
// create the consumer
SPLStream consumerStream = SPL.invokeSource(topo, Constants.KafkaConsumerOp, getKafkaParams(), KafkaSPLStreamsUtils.STRING_NOKEY_SCHEMA);
SPLStream msgStream = SPLStreams.stringToSPLStream(consumerStream.convert(t -> t.getString("message")));
// test the output of the consumer
StreamsContext<?> context = StreamsContextFactory.getStreamsContext(Type.DISTRIBUTED_TESTER);
Tester tester = topo.getTester();
Condition<List<String>> condition = KafkaSPLStreamsUtils.stringContentsUnordered(tester, msgStream, Constants.STRING_DATA);
tester.complete(context, new HashMap<>(), condition, 30, TimeUnit.SECONDS);
// check the results
Assert.assertTrue(condition.getResult().size() > 0);
Assert.assertTrue(condition.getResult().toString(), condition.valid());
}
use of com.ibm.streamsx.topology.spl.SPLStream in project streamsx.kafka by IBMStreams.
the class KafkaProducerFanOutTest method kafkaFanOutTest.
@Test
public void kafkaFanOutTest() throws Exception {
Topology topo = getTopology();
// create the producers (produces tuples after a short delay)
TStream<String> stringSrcStream = topo.strings(Constants.STRING_DATA).modify(new Delay<>(5000)).lowLatency();
SPL.invokeSink(Constants.KafkaProducerOp, KafkaSPLStreamsUtils.convertStreamToKafkaTuple(stringSrcStream), getKafkaParams());
SPL.invokeSink(Constants.KafkaProducerOp, KafkaSPLStreamsUtils.convertStreamToKafkaTuple(stringSrcStream), getKafkaParams());
// create the consumer
SPLStream consumerStream = SPL.invokeSource(topo, Constants.KafkaConsumerOp, getKafkaParams(), KafkaSPLStreamsUtils.STRING_SCHEMA);
SPLStream msgStream = SPLStreams.stringToSPLStream(consumerStream.convert(t -> t.getString("message")));
// test the output of the consumer
StreamsContext<?> context = StreamsContextFactory.getStreamsContext(Type.DISTRIBUTED_TESTER);
Tester tester = topo.getTester();
// both producers are sending the same data, so each result is duplicated
String[] expectedArr = KafkaSPLStreamsUtils.duplicateArrayEntries(Constants.STRING_DATA, 2);
Condition<List<String>> condition = KafkaSPLStreamsUtils.stringContentsUnordered(tester, msgStream, expectedArr);
tester.complete(context, new HashMap<>(), condition, 30, TimeUnit.SECONDS);
// check the results
Assert.assertTrue(condition.getResult().size() > 0);
Assert.assertTrue(condition.getResult().toString(), condition.valid());
}
use of com.ibm.streamsx.topology.spl.SPLStream in project streamsx.kafka by IBMStreams.
the class KafkaProducerPartitionAttrTest method kafkaProducerPartitionAttrTest.
@Test
public void kafkaProducerPartitionAttrTest() throws Exception {
Topology topo = getTopology();
topo.addFileDependency("etc/custom_partitioner.properties", "etc");
topo.addFileDependency("etc/custompartitioner.jar", "etc");
// create producer
TStream<Message<Integer, String>> src = topo.limitedSource(new MySupplier(), 9).modify(new Delay<>(Constants.PRODUCER_DELAY));
SPLStream outStream = SPLStreams.convertStream(src, new MessageConverter(), PRODUCER_SCHEMA);
SPL.invokeSink(Constants.KafkaProducerOp, outStream, getKafkaProducerParams());
// create the consumers
SPLStream msgStream1 = createConsumer(topo, PARTITION_NUM);
TStream<String> unionStream = msgStream1.transform(t -> t.getString("message"));
SPLStream msgStream = SPLStreams.stringToSPLStream(unionStream);
StreamsContext<?> context = StreamsContextFactory.getStreamsContext(Type.DISTRIBUTED_TESTER);
Tester tester = topo.getTester();
String[] expectedArr = { "A0", "B1", "C2", "A3", "B4", "C5", "A6", "B7", "C8" };
Condition<List<String>> condition = KafkaSPLStreamsUtils.stringContentsUnordered(tester, msgStream, expectedArr);
tester.complete(context, new HashMap<>(), condition, 30, TimeUnit.SECONDS);
// check the results
Assert.assertTrue(condition.getResult().size() > 0);
Assert.assertTrue(condition.getResult().toString(), condition.valid());
}
use of com.ibm.streamsx.topology.spl.SPLStream in project streamsx.topology by IBMStreams.
the class HTTPStreams method getJSON.
/**
* Periodically poll a web service using HTTP {@code GET} for
* {@code application/json} data. Declares a source stream that will contain
* a single tuple for each successful {@code GET}. The tuple is the complete
* JSON ({@code application/json} content) returned by the request.
*
* @param te
* Topology the source stream will be contained in.
* @param url
* URL to poll.
* @param period
* Polling period.
* @param unit
* Unit for {@code period}.
* @return Stream that will contain the JSON tuples from periodic HTTP
* {@code GET} requests.
*/
public static TStream<JSONObject> getJSON(TopologyElement te, String url, long period, TimeUnit unit) {
Map<String, Object> params = new HashMap<>();
params.put("url", url);
double dperiod = (unit.toMillis(period) / 1000.0);
params.put("period", dperiod);
SPLStream rawJson = SPL.invokeSource(te, "com.ibm.streamsx.inet.http::HTTPGetJSONContent", params, JSONSchemas.JSON);
TStream<String> string = SPLStreams.toStringStream(rawJson);
return JSONStreams.deserialize(string);
}
Aggregations