use of com.ibm.streams.operator.OutputTuple in project streamsx.topology by IBMStreams.
the class TextFileReader method process.
@Override
public void process(StreamingInput<Tuple> stream, Tuple tuple) throws Exception {
final StreamingOutput<OutputTuple> out = getOutput(0);
String fileName = tuple.getString(0);
File file = new File(fileName);
if (!file.isAbsolute()) {
file = new File(getOperatorContext().getPE().getDataDirectory(), fileName);
}
FileInputStream fis = new FileInputStream(file);
try {
BufferedReader br = new BufferedReader(new InputStreamReader(fis, charset), 128 * 1024);
for (; ; ) {
String line = br.readLine();
if (line == null)
break;
out.submitAsTuple(new RString(line));
}
br.close();
} finally {
fis.close();
}
}
use of com.ibm.streams.operator.OutputTuple in project streamsx.health by IBMStreams.
the class OruR01Ingest method run.
@Override
public void run() {
Topology topology = new Topology("OruR01Ingest");
ObxToSplMapper mapper = new ObxToSplMapper();
addDependencies(topology);
TStream<Message> messages = topology.endlessSource(new HapiMessageSupplier(getPort()));
// transform message to Observation object
TStream<Observation> observationStream = messages.multiTransform(message -> {
return mapper.messageToModel(message);
});
StreamSchema schema = Type.Factory.getStreamSchema(Observation.OBSERVATION_SCHEMA_SPL);
@SuppressWarnings("serial") SPLStream splObservations = SPLStreams.convertStream(observationStream, new BiFunction<Observation, OutputTuple, OutputTuple>() {
@Override
public OutputTuple apply(Observation observation, OutputTuple outTuple) {
return mapper.modelToSpl(observation, outTuple);
}
}, schema);
splObservations.print();
splObservations.publish(getTopic());
try {
StreamsContextFactory.getStreamsContext(StreamsContext.Type.DISTRIBUTED).submit(topology);
} catch (Exception e) {
TRACE.error("Unable to submit topology", e);
}
}
use of com.ibm.streams.operator.OutputTuple in project streamsx.topology by IBMStreams.
the class PythonFunctionalOperatorsTest method testTupleStream.
public static SPLStream testTupleStream(Topology topology, boolean withSets) {
TStream<Long> beacon = BeaconStreams.longBeacon(topology, TUPLE_COUNT);
SPLStream tuples = SPLStreams.convertStream(beacon, new BiFunction<Long, OutputTuple, OutputTuple>() {
private static final long serialVersionUID = 1L;
private transient TupleType type;
private transient Random rand;
@Override
public OutputTuple apply(Long v1, OutputTuple v2) {
if (type == null) {
type = Type.Factory.getTupleType(getPythonTypesSchema(withSets).getLanguageType());
rand = new Random();
}
Tuple randTuple = (Tuple) type.randomValue(rand);
v2.assign(randTuple);
return v2;
}
}, getPythonTypesSchema(withSets));
return tuples;
}
use of com.ibm.streams.operator.OutputTuple in project streamsx.kafka by IBMStreams.
the class AbstractKafkaConsumerOperator method submitRecord.
private void submitRecord(ConsumerRecord<?, ?> record) throws Exception {
if (logger.isTraceEnabled())
// $NON-NLS-1$
logger.trace("Preparing to submit record: " + record);
// In these cases we drop the record and increment the metric 'nMalformedMessages'.
if (record.value() == null) {
logger.warn("dropping message with malformed value from topic = " + record.topic() + ", partition = " + record.partition() + ", offset = " + record.offset());
nMalformedMessages.increment();
return;
}
final StreamingOutput<OutputTuple> out = getOutput(0);
OutputTuple tuple = out.newTuple();
setTuple(tuple, outputMessageAttrName, record.value());
if (hasOutputKey) {
// if record.key() is null, we have no evidence that this happend really by a malformed key.
// It can also be an unkeyed message. So, dropping the message seems not appropriate in this case.
//
// key = null would be mapped to
// * empty rstring
// * 0 for Integer, or float64
//
// in the key attribute of the outgoing tuple.
// if (record.key() == null) {
// logger.warn("dropping message with malformed key from topic = "
// + record.topic() + ", partition = " + record.partition() + ", offset = " + record.offset());
// nMalformedMessages.increment();
// return;
// }
setTuple(tuple, outputKeyAttrName, record.key());
}
if (hasOutputTopic) {
tuple.setString(outputTopicAttrName, record.topic());
}
if (hasOutputOffset) {
tuple.setLong(outputOffsetAttrName, record.offset());
}
if (hasOutputPartition) {
tuple.setInt(outputPartitionAttrName, record.partition());
}
if (hasOutputTimetamp) {
tuple.setLong(outputMessageTimestampAttrName, record.timestamp());
}
// $NON-NLS-1$
if (logger.isDebugEnabled())
logger.debug("Submitting tuple: " + tuple);
out.submit(tuple);
}
Aggregations