Search in sources :

Example 6 with OutputTuple

use of com.ibm.streams.operator.OutputTuple in project streamsx.topology by IBMStreams.

the class TextFileReader method process.

@Override
public void process(StreamingInput<Tuple> stream, Tuple tuple) throws Exception {
    final StreamingOutput<OutputTuple> out = getOutput(0);
    String fileName = tuple.getString(0);
    File file = new File(fileName);
    if (!file.isAbsolute()) {
        file = new File(getOperatorContext().getPE().getDataDirectory(), fileName);
    }
    FileInputStream fis = new FileInputStream(file);
    try {
        BufferedReader br = new BufferedReader(new InputStreamReader(fis, charset), 128 * 1024);
        for (; ; ) {
            String line = br.readLine();
            if (line == null)
                break;
            out.submitAsTuple(new RString(line));
        }
        br.close();
    } finally {
        fis.close();
    }
}
Also used : InputStreamReader(java.io.InputStreamReader) BufferedReader(java.io.BufferedReader) RString(com.ibm.streams.operator.types.RString) RString(com.ibm.streams.operator.types.RString) OutputTuple(com.ibm.streams.operator.OutputTuple) File(java.io.File) FileInputStream(java.io.FileInputStream)

Example 7 with OutputTuple

use of com.ibm.streams.operator.OutputTuple in project streamsx.health by IBMStreams.

the class OruR01Ingest method run.

@Override
public void run() {
    Topology topology = new Topology("OruR01Ingest");
    ObxToSplMapper mapper = new ObxToSplMapper();
    addDependencies(topology);
    TStream<Message> messages = topology.endlessSource(new HapiMessageSupplier(getPort()));
    // transform message to Observation object
    TStream<Observation> observationStream = messages.multiTransform(message -> {
        return mapper.messageToModel(message);
    });
    StreamSchema schema = Type.Factory.getStreamSchema(Observation.OBSERVATION_SCHEMA_SPL);
    @SuppressWarnings("serial") SPLStream splObservations = SPLStreams.convertStream(observationStream, new BiFunction<Observation, OutputTuple, OutputTuple>() {

        @Override
        public OutputTuple apply(Observation observation, OutputTuple outTuple) {
            return mapper.modelToSpl(observation, outTuple);
        }
    }, schema);
    splObservations.print();
    splObservations.publish(getTopic());
    try {
        StreamsContextFactory.getStreamsContext(StreamsContext.Type.DISTRIBUTED).submit(topology);
    } catch (Exception e) {
        TRACE.error("Unable to submit topology", e);
    }
}
Also used : Message(ca.uhn.hl7v2.model.Message) Topology(com.ibm.streamsx.topology.Topology) HapiMessageSupplier(com.ibm.streamsx.health.hapi.internal.HapiMessageSupplier) StreamSchema(com.ibm.streams.operator.StreamSchema) OutputTuple(com.ibm.streams.operator.OutputTuple) SPLStream(com.ibm.streamsx.topology.spl.SPLStream) ObxToSplMapper(com.ibm.streamsx.health.hapi.mapper.ObxToSplMapper) Observation(com.ibm.streamsx.health.hapi.model.Observation)

Example 8 with OutputTuple

use of com.ibm.streams.operator.OutputTuple in project streamsx.topology by IBMStreams.

the class PythonFunctionalOperatorsTest method testTupleStream.

public static SPLStream testTupleStream(Topology topology, boolean withSets) {
    TStream<Long> beacon = BeaconStreams.longBeacon(topology, TUPLE_COUNT);
    SPLStream tuples = SPLStreams.convertStream(beacon, new BiFunction<Long, OutputTuple, OutputTuple>() {

        private static final long serialVersionUID = 1L;

        private transient TupleType type;

        private transient Random rand;

        @Override
        public OutputTuple apply(Long v1, OutputTuple v2) {
            if (type == null) {
                type = Type.Factory.getTupleType(getPythonTypesSchema(withSets).getLanguageType());
                rand = new Random();
            }
            Tuple randTuple = (Tuple) type.randomValue(rand);
            v2.assign(randTuple);
            return v2;
        }
    }, getPythonTypesSchema(withSets));
    return tuples;
}
Also used : Random(java.util.Random) TupleType(com.ibm.streams.operator.meta.TupleType) OutputTuple(com.ibm.streams.operator.OutputTuple) SPLStream(com.ibm.streamsx.topology.spl.SPLStream) OutputTuple(com.ibm.streams.operator.OutputTuple) Tuple(com.ibm.streams.operator.Tuple)

Example 9 with OutputTuple

use of com.ibm.streams.operator.OutputTuple in project streamsx.kafka by IBMStreams.

the class AbstractKafkaConsumerOperator method submitRecord.

private void submitRecord(ConsumerRecord<?, ?> record) throws Exception {
    if (logger.isTraceEnabled())
        // $NON-NLS-1$
        logger.trace("Preparing to submit record: " + record);
    // In these cases we drop the record and increment the metric 'nMalformedMessages'.
    if (record.value() == null) {
        logger.warn("dropping message with malformed value from topic = " + record.topic() + ", partition = " + record.partition() + ", offset = " + record.offset());
        nMalformedMessages.increment();
        return;
    }
    final StreamingOutput<OutputTuple> out = getOutput(0);
    OutputTuple tuple = out.newTuple();
    setTuple(tuple, outputMessageAttrName, record.value());
    if (hasOutputKey) {
        // if record.key() is null, we have no evidence that this happend really by a malformed key.
        // It can also be an unkeyed message. So, dropping the message seems not appropriate in this case.
        // 
        // key = null would be mapped to
        // * empty rstring
        // * 0 for Integer, or float64
        // 
        // in the key attribute of the outgoing tuple.
        // if (record.key() == null) {
        // logger.warn("dropping message with malformed key from topic = "
        // + record.topic() + ", partition = " + record.partition() + ", offset = " + record.offset());
        // nMalformedMessages.increment();
        // return;
        // }
        setTuple(tuple, outputKeyAttrName, record.key());
    }
    if (hasOutputTopic) {
        tuple.setString(outputTopicAttrName, record.topic());
    }
    if (hasOutputOffset) {
        tuple.setLong(outputOffsetAttrName, record.offset());
    }
    if (hasOutputPartition) {
        tuple.setInt(outputPartitionAttrName, record.partition());
    }
    if (hasOutputTimetamp) {
        tuple.setLong(outputMessageTimestampAttrName, record.timestamp());
    }
    // $NON-NLS-1$
    if (logger.isDebugEnabled())
        logger.debug("Submitting tuple: " + tuple);
    out.submit(tuple);
}
Also used : OutputTuple(com.ibm.streams.operator.OutputTuple)

Aggregations

OutputTuple (com.ibm.streams.operator.OutputTuple)9 SPLStream (com.ibm.streamsx.topology.spl.SPLStream)3 StreamSchema (com.ibm.streams.operator.StreamSchema)2 Message (ca.uhn.hl7v2.model.Message)1 Tuple (com.ibm.streams.operator.Tuple)1 TupleType (com.ibm.streams.operator.meta.TupleType)1 RString (com.ibm.streams.operator.types.RString)1 HapiMessageSupplier (com.ibm.streamsx.health.hapi.internal.HapiMessageSupplier)1 ObxToSplMapper (com.ibm.streamsx.health.hapi.mapper.ObxToSplMapper)1 Observation (com.ibm.streamsx.health.hapi.model.Observation)1 Topology (com.ibm.streamsx.topology.Topology)1 FunctionalHelper.getLogicObject (com.ibm.streamsx.topology.internal.functional.FunctionalHelper.getLogicObject)1 BeatDetectAndClassifyResult (eplimited.osea.classification.BeatDetectionAndClassification.BeatDetectAndClassifyResult)1 BufferedReader (java.io.BufferedReader)1 File (java.io.File)1 FileInputStream (java.io.FileInputStream)1 InputStreamReader (java.io.InputStreamReader)1 Random (java.util.Random)1