use of com.ibm.streamsx.topology.spl.SPLStream in project streamsx.health by IBMStreams.
the class JsonPublisher method publish.
public static void publish(TStream<String> jsonInputStream, String topic) {
SPLStream splStream = SPLStreams.convertStream(jsonInputStream, new JsonToSpl(), JSONSchemas.JSON);
splStream.publish(topic);
}
use of com.ibm.streamsx.topology.spl.SPLStream in project streamsx.health by IBMStreams.
the class OruR01Ingest method run.
@Override
public void run() {
Topology topology = new Topology("OruR01Ingest");
ObxToSplMapper mapper = new ObxToSplMapper();
addDependencies(topology);
TStream<Message> messages = topology.endlessSource(new HapiMessageSupplier(getPort()));
// transform message to Observation object
TStream<Observation> observationStream = messages.multiTransform(message -> {
return mapper.messageToModel(message);
});
StreamSchema schema = Type.Factory.getStreamSchema(Observation.OBSERVATION_SCHEMA_SPL);
@SuppressWarnings("serial") SPLStream splObservations = SPLStreams.convertStream(observationStream, new BiFunction<Observation, OutputTuple, OutputTuple>() {
@Override
public OutputTuple apply(Observation observation, OutputTuple outTuple) {
return mapper.modelToSpl(observation, outTuple);
}
}, schema);
splObservations.print();
splObservations.publish(getTopic());
try {
StreamsContextFactory.getStreamsContext(StreamsContext.Type.DISTRIBUTED).submit(topology);
} catch (Exception e) {
TRACE.error("Unable to submit topology", e);
}
}
use of com.ibm.streamsx.topology.spl.SPLStream in project streamsx.health by IBMStreams.
the class JsonSubscriber method subscribe.
public static TStream<String> subscribe(Topology topo, String topic) {
SPLStream splStream = SPLStreams.subscribe(topo, topic, JSONSchemas.JSON);
TStream<String> stringStream = splStream.transform(new SplToTStream());
return stringStream;
}
use of com.ibm.streamsx.topology.spl.SPLStream in project streamsx.kafka by IBMStreams.
the class KafkaConsumerFanInTest method kafkaFanInTest.
@Test
public void kafkaFanInTest() throws Exception {
Topology topo = getTopology();
// create the producer (produces tuples after a short delay)
TStream<String> stringSrcStream = topo.strings(Constants.STRING_DATA).modify(new Delay<>(5000));
SPL.invokeSink(Constants.KafkaProducerOp, KafkaSPLStreamsUtils.convertStreamToKafkaTuple(stringSrcStream), getKafkaParams());
// create the consumer
SPLStream consumerStream1 = SPL.invokeSource(topo, Constants.KafkaConsumerOp, getConsumerParams(1), KafkaSPLStreamsUtils.STRING_SCHEMA);
SPLStream consumerStream2 = SPL.invokeSource(topo, Constants.KafkaConsumerOp, getConsumerParams(2), KafkaSPLStreamsUtils.STRING_SCHEMA);
SPLStream unionStream = KafkaSPLStreamsUtils.union(Arrays.asList(consumerStream1, consumerStream2), KafkaSPLStreamsUtils.STRING_SCHEMA);
SPLStream msgStream = SPLStreams.stringToSPLStream(unionStream.convert(t -> t.getString("message")));
// test the output of the consumer
StreamsContext<?> context = StreamsContextFactory.getStreamsContext(Type.DISTRIBUTED_TESTER);
Tester tester = topo.getTester();
// both consumers consume the same data, so each result is duplicated
String[] expectedArr = KafkaSPLStreamsUtils.duplicateArrayEntries(Constants.STRING_DATA, 2);
Condition<List<String>> condition = KafkaSPLStreamsUtils.stringContentsUnordered(tester, msgStream, expectedArr);
tester.complete(context, new HashMap<>(), condition, 30, TimeUnit.SECONDS);
// check the results
Assert.assertTrue(condition.getResult().size() > 0);
Assert.assertTrue(condition.getResult().toString(), condition.valid());
}
use of com.ibm.streamsx.topology.spl.SPLStream in project streamsx.kafka by IBMStreams.
the class KafkaOperatorsAttrNameParamsTest method kafkaAttrNameParamsTest.
@Test
public void kafkaAttrNameParamsTest() throws Exception {
Topology topo = getTopology();
StreamSchema producerSchema = com.ibm.streams.operator.Type.Factory.getStreamSchema("tuple<int32 " + PROD_KEY_ATTR_NAME + ", rstring " + PROD_MSG_ATTR_NAME + ", rstring " + PROD_TOPIC_ATTR_NAME + ", int32 " + PROD_PARTITION_ATTR_NAME + ">");
// create the producer (produces tuples after a short delay)
Map<String, Object> producerProps = new HashMap<>();
producerProps.put("propertiesFile", Constants.PROPERTIES_FILE_PATH);
producerProps.put("messageAttribute", producerSchema.getAttribute(PROD_MSG_ATTR_NAME));
producerProps.put("keyAttribute", producerSchema.getAttribute(PROD_KEY_ATTR_NAME));
producerProps.put("topicAttribute", producerSchema.getAttribute(PROD_TOPIC_ATTR_NAME));
producerProps.put("partitionAttribute", producerSchema.getAttribute(PROD_PARTITION_ATTR_NAME));
TStream<String> srcStream = topo.strings(MSG).modify(new Delay<>(5000));
SPL.invokeSink(Constants.KafkaProducerOp, SPLStreams.convertStream(srcStream, new ProducerConverter(), producerSchema), producerProps);
// create the consumer
StreamSchema consumerSchema = com.ibm.streams.operator.Type.Factory.getStreamSchema("tuple<int32 " + CONS_KEY_ATTR_NAME + ", rstring " + CONS_MSG_ATTR_NAME + ", rstring " + CONS_TOPIC_ATTR_NAME + ">");
Map<String, Object> consumerProps = new HashMap<String, Object>();
consumerProps.put("propertiesFile", Constants.PROPERTIES_FILE_PATH);
consumerProps.put("outputMessageAttributeName", CONS_MSG_ATTR_NAME);
consumerProps.put("outputKeyAttributeName", CONS_KEY_ATTR_NAME);
consumerProps.put("outputTopicAttributeName", CONS_TOPIC_ATTR_NAME);
consumerProps.put("topic", Constants.TOPIC_TEST);
SPLStream consumerStream = SPL.invokeSource(topo, Constants.KafkaConsumerOp, consumerProps, consumerSchema);
SPLStream msgStream = SPLStreams.stringToSPLStream(consumerStream.convert(t -> {
return t.getString(CONS_TOPIC_ATTR_NAME) + ":" + t.getInt(CONS_KEY_ATTR_NAME) + ":" + t.getString(CONS_MSG_ATTR_NAME);
}));
// test the output of the consumer
StreamsContext<?> context = StreamsContextFactory.getStreamsContext(Type.DISTRIBUTED_TESTER);
Tester tester = topo.getTester();
Condition<List<String>> condition = KafkaSPLStreamsUtils.stringContentsUnordered(tester, msgStream, Constants.TOPIC_TEST + ":" + KEY + ":" + MSG);
tester.complete(context, new HashMap<>(), condition, 30, TimeUnit.SECONDS);
// check the results
Assert.assertTrue(condition.getResult().size() > 0);
Assert.assertTrue(condition.getResult().toString(), condition.valid());
}
Aggregations