use of com.ibm.streams.operator.StreamSchema in project streamsx.topology by IBMStreams.
the class Topology method subscribe.
/**
* Declare a stream that is a subscription to {@code topic}.
* A topic is published using {@link TStream#publish(String)}.
* Subscribers are matched to published streams when the {@code topic}
* is an exact match and the type of the stream ({@code T},
* {@code tupleTypeClass}) is an exact match.
* <BR>
* Publish-subscribe is a many to many relationship,
* multiple streams from multiple applications may
* be published on the same topic and type. Multiple
* subscribers may subscribe to a topic and type.
* <BR>
* A subscription will match all publishers using the
* same topic and tuple type. Tuples on the published
* streams will appear on the returned stream, as
* a single stream.
* <BR>
* The subscription is dynamic, the returned stream
* will subscribe to a matching stream published by
* a newly submitted application (a job), and stops a
* subscription when an running job is cancelled.
* <P>
* Publish-subscribe only works when the topology is
* submitted to a {@link com.ibm.streamsx.topology.context.StreamsContext.Type#DISTRIBUTED}
* context. This allows different applications (or
* even within the same application) to communicate
* using published streams.
* </P>
* <P>
* If {@code tupleTypeClass} is {@code JSONObject.class} then the
* subscription is the generic IBM Streams schema for JSON
* ({@link JSONSchemas#JSON}). Streams of type {@code JSONObject}
* are always published and subscribed using the generic schema
* to allow interchange between applications implemented in
* different languages.
* </P>
* @param topic Topic to subscribe to.
* @param tupleTypeClass Type to subscribe to.
* @return Stream the will contain tuples from matching publishers.
*
* @see TStream#publish(String)
* @see SPLStreams#subscribe(TopologyElement, String, com.ibm.streams.operator.StreamSchema)
*/
public <T> TStream<T> subscribe(String topic, Class<T> tupleTypeClass) {
checkTopicFilter(topic);
if (JSONObject.class.equals(tupleTypeClass)) {
@SuppressWarnings("unchecked") TStream<T> json = (TStream<T>) SPLStreams.subscribe(this, topic, JSONSchemas.JSON).toJSON();
return json;
}
StreamSchema mappingSchema = Schemas.getSPLMappingSchema(tupleTypeClass);
SPLStream splImport;
// Subscribed as an SPL Stream.
if (Schemas.usesDirectSchema(tupleTypeClass)) {
splImport = SPLStreams.subscribe(this, topic, mappingSchema);
} else {
Map<String, Object> params = new HashMap<>();
params.put("topic", topic);
params.put("class", tupleTypeClass.getName());
params.put("streamType", mappingSchema);
splImport = SPL.invokeSource(this, "com.ibm.streamsx.topology.topic::SubscribeJava", params, mappingSchema);
}
return new StreamImpl<T>(this, splImport.output(), tupleTypeClass);
}
use of com.ibm.streams.operator.StreamSchema in project streamsx.kafka by IBMStreams.
the class AbstractKafkaProducerOperator method initialize.
/**
* Initialize this operator. Called once before any tuples are processed.
*
* @param context
* OperatorContext for this operator.
* @throws Exception
* Operator failure, will cause the enclosing PE to terminate.
*/
@Override
public synchronized void initialize(OperatorContext context) throws Exception {
super.initialize(context);
logger.trace(// $NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
"Operator " + context.getName() + " initializing in PE: " + context.getPE().getPEId() + " in Job: " + context.getPE().getJobId());
StreamSchema inputSchema = context.getStreamingInputs().get(0).getStreamSchema();
// check for key attribute and get type
Attribute keyAttribute = null;
if (keyAttr != null && keyAttr.getAttribute() != null) {
keyAttribute = keyAttr.getAttribute();
} else {
keyAttribute = inputSchema.getAttribute(DEFAULT_KEY_ATTR_NAME);
}
if (keyAttribute != null) {
keyType = keyAttribute.getType().getObjectType();
keyAttributeName = keyAttribute.getName();
}
// check for partition attribute
Attribute partitionAttribute = null;
if (partitionAttr != null && partitionAttr.getAttribute() != null) {
partitionAttribute = partitionAttr.getAttribute();
} else {
partitionAttribute = inputSchema.getAttribute(DEFAULT_PARTITION_ATTR_NAME);
}
partitionAttributeName = partitionAttribute != null ? partitionAttribute.getName() : null;
// check for timestamp attribute
Attribute timestampAttribute = null;
if (timestampAttr != null && timestampAttr.getAttribute() != null) {
timestampAttribute = timestampAttr.getAttribute();
} else {
timestampAttribute = inputSchema.getAttribute(DEFAULT_TIMESTAMP_ATTR_NAME);
}
timestampAttributeName = timestampAttribute != null ? timestampAttribute.getName() : null;
// get message type
messageType = messageAttr.getAttribute().getType().getObjectType();
crContext = context.getOptionalContext(ConsistentRegionContext.class);
if (crContext != null) {
isResetting = new AtomicBoolean(context.getPE().getRelaunchCount() > 0);
}
initProducer();
registerForDataGovernance(context, topics);
// $NON-NLS-1$
logger.info(">>> Operator initialized! <<<");
}
use of com.ibm.streams.operator.StreamSchema in project streamsx.kafka by IBMStreams.
the class KafkaOperatorsBlobTypeTest method kafkaBlobTypeTest.
@Test
public void kafkaBlobTypeTest() throws Exception {
Topology topo = getTopology();
StreamSchema schema = KafkaSPLStreamsUtils.BLOB_SCHEMA;
// create the producer (produces tuples after a short delay)
TStream<Blob> srcStream = topo.strings(Constants.STRING_DATA).transform(s -> ValueFactory.newBlob(s.getBytes())).modify(new Delay<>(5000));
SPLStream splSrcStream = SPLStreams.convertStream(srcStream, new Converter(), schema);
SPL.invokeSink(Constants.KafkaProducerOp, splSrcStream, getKafkaParams());
// create the consumer
SPLStream consumerStream = SPL.invokeSource(topo, Constants.KafkaConsumerOp, getKafkaParams(), schema);
SPLStream msgStream = SPLStreams.stringToSPLStream(consumerStream.convert(t -> new String(t.getBlob("message").getData())));
// test the output of the consumer
StreamsContext<?> context = StreamsContextFactory.getStreamsContext(Type.DISTRIBUTED_TESTER);
Tester tester = topo.getTester();
Condition<List<String>> condition = KafkaSPLStreamsUtils.stringContentsUnordered(tester, msgStream, Constants.STRING_DATA);
tester.complete(context, new HashMap<>(), condition, 30, TimeUnit.SECONDS);
// check the results
Assert.assertTrue(condition.getResult().size() > 0);
Assert.assertTrue(condition.getResult().toString(), condition.valid());
}
use of com.ibm.streams.operator.StreamSchema in project streamsx.kafka by IBMStreams.
the class KafkaOperatorsDoubleTypeTest method kafkaDoubleTypeTest.
@Test
public void kafkaDoubleTypeTest() throws Exception {
Topology topo = getTopology();
StreamSchema schema = KafkaSPLStreamsUtils.DOUBLE_SCHEMA;
// create the producer (produces tuples after a short delay)
TStream<Double> srcStream = topo.strings(DATA).transform(s -> Double.valueOf(s)).modify(new Delay<>(5000));
SPLStream splSrcStream = SPLStreams.convertStream(srcStream, new Converter(), schema);
SPL.invokeSink(Constants.KafkaProducerOp, splSrcStream, getKafkaParams());
// create the consumer
SPLStream consumerStream = SPL.invokeSource(topo, Constants.KafkaConsumerOp, getKafkaParams(), schema);
SPLStream msgStream = SPLStreams.stringToSPLStream(consumerStream.convert(t -> String.valueOf(t.getDouble("message"))));
// test the output of the consumer
StreamsContext<?> context = StreamsContextFactory.getStreamsContext(Type.DISTRIBUTED_TESTER);
Tester tester = topo.getTester();
Condition<List<String>> condition = KafkaSPLStreamsUtils.stringContentsUnordered(tester, msgStream, DATA);
tester.complete(context, new HashMap<>(), condition, 30, TimeUnit.SECONDS);
// check the results
Assert.assertTrue(condition.getResult().size() > 0);
Assert.assertTrue(condition.getResult().toString(), condition.valid());
}
use of com.ibm.streams.operator.StreamSchema in project streamsx.kafka by IBMStreams.
the class KafkaOperatorsFloatTypeTest method kafkaFloatTypeTest.
@Test
public void kafkaFloatTypeTest() throws Exception {
Topology topo = getTopology();
StreamSchema schema = KafkaSPLStreamsUtils.FLOAT_SCHEMA;
// create the producer (produces tuples after a short delay)
TStream<Float> srcStream = topo.strings(DATA).transform(s -> Float.valueOf(s)).modify(new Delay<>(5000));
SPLStream splSrcStream = SPLStreams.convertStream(srcStream, new Converter(), schema);
SPL.invokeSink(Constants.KafkaProducerOp, splSrcStream, getKafkaParams());
// create the consumer
SPLStream consumerStream = SPL.invokeSource(topo, Constants.KafkaConsumerOp, getKafkaParams(), schema);
SPLStream msgStream = SPLStreams.stringToSPLStream(consumerStream.convert(t -> String.valueOf(t.getFloat("message"))));
// test the output of the consumer
StreamsContext<?> context = StreamsContextFactory.getStreamsContext(Type.DISTRIBUTED_TESTER);
Tester tester = topo.getTester();
Condition<List<String>> condition = KafkaSPLStreamsUtils.stringContentsUnordered(tester, msgStream, DATA);
tester.complete(context, new HashMap<>(), condition, 30, TimeUnit.SECONDS);
// check the results
Assert.assertTrue(condition.getResult().size() > 0);
Assert.assertTrue(condition.getResult().toString(), condition.valid());
}
Aggregations