use of com.ibm.streams.operator.StreamSchema in project streamsx.kafka by IBMStreams.
the class AbstractKafkaProducerOperator method initialize.
/**
* Initialize this operator. Called once before any tuples are processed.
*
* @param context
* OperatorContext for this operator.
* @throws Exception
* Operator failure, will cause the enclosing PE to terminate.
*/
@Override
public synchronized void initialize(OperatorContext context) throws Exception {
super.initialize(context);
logger.trace(// $NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
"Operator " + context.getName() + " initializing in PE: " + context.getPE().getPEId() + " in Job: " + context.getPE().getJobId());
StreamSchema inputSchema = context.getStreamingInputs().get(0).getStreamSchema();
// check for key attribute and get type
Attribute keyAttribute = null;
if (keyAttr != null && keyAttr.getAttribute() != null) {
keyAttribute = keyAttr.getAttribute();
} else {
keyAttribute = inputSchema.getAttribute(DEFAULT_KEY_ATTR_NAME);
}
if (keyAttribute != null) {
keyType = keyAttribute.getType().getObjectType();
keyAttributeName = keyAttribute.getName();
}
// check for partition attribute
Attribute partitionAttribute = null;
if (partitionAttr != null && partitionAttr.getAttribute() != null) {
partitionAttribute = partitionAttr.getAttribute();
} else {
partitionAttribute = inputSchema.getAttribute(DEFAULT_PARTITION_ATTR_NAME);
}
partitionAttributeName = partitionAttribute != null ? partitionAttribute.getName() : null;
// check for timestamp attribute
Attribute timestampAttribute = null;
if (timestampAttr != null && timestampAttr.getAttribute() != null) {
timestampAttribute = timestampAttr.getAttribute();
} else {
timestampAttribute = inputSchema.getAttribute(DEFAULT_TIMESTAMP_ATTR_NAME);
}
timestampAttributeName = timestampAttribute != null ? timestampAttribute.getName() : null;
// get message type
messageType = messageAttr.getAttribute().getType().getObjectType();
crContext = context.getOptionalContext(ConsistentRegionContext.class);
if (crContext != null) {
isResetting = new AtomicBoolean(context.getPE().getRelaunchCount() > 0);
}
initProducer();
registerForDataGovernance(context, topics);
// $NON-NLS-1$
logger.info(">>> Operator initialized! <<<");
}
use of com.ibm.streams.operator.StreamSchema in project streamsx.kafka by IBMStreams.
the class KafkaOperatorsBlobTypeTest method kafkaBlobTypeTest.
@Test
public void kafkaBlobTypeTest() throws Exception {
Topology topo = getTopology();
StreamSchema schema = KafkaSPLStreamsUtils.BLOB_SCHEMA;
// create the producer (produces tuples after a short delay)
TStream<Blob> srcStream = topo.strings(Constants.STRING_DATA).transform(s -> ValueFactory.newBlob(s.getBytes())).modify(new Delay<>(5000));
SPLStream splSrcStream = SPLStreams.convertStream(srcStream, new Converter(), schema);
SPL.invokeSink(Constants.KafkaProducerOp, splSrcStream, getKafkaParams());
// create the consumer
SPLStream consumerStream = SPL.invokeSource(topo, Constants.KafkaConsumerOp, getKafkaParams(), schema);
SPLStream msgStream = SPLStreams.stringToSPLStream(consumerStream.convert(t -> new String(t.getBlob("message").getData())));
// test the output of the consumer
StreamsContext<?> context = StreamsContextFactory.getStreamsContext(Type.DISTRIBUTED_TESTER);
Tester tester = topo.getTester();
Condition<List<String>> condition = KafkaSPLStreamsUtils.stringContentsUnordered(tester, msgStream, Constants.STRING_DATA);
tester.complete(context, new HashMap<>(), condition, 30, TimeUnit.SECONDS);
// check the results
Assert.assertTrue(condition.getResult().size() > 0);
Assert.assertTrue(condition.getResult().toString(), condition.valid());
}
use of com.ibm.streams.operator.StreamSchema in project streamsx.kafka by IBMStreams.
the class KafkaOperatorsDoubleTypeTest method kafkaDoubleTypeTest.
@Test
public void kafkaDoubleTypeTest() throws Exception {
Topology topo = getTopology();
StreamSchema schema = KafkaSPLStreamsUtils.DOUBLE_SCHEMA;
// create the producer (produces tuples after a short delay)
TStream<Double> srcStream = topo.strings(DATA).transform(s -> Double.valueOf(s)).modify(new Delay<>(5000));
SPLStream splSrcStream = SPLStreams.convertStream(srcStream, new Converter(), schema);
SPL.invokeSink(Constants.KafkaProducerOp, splSrcStream, getKafkaParams());
// create the consumer
SPLStream consumerStream = SPL.invokeSource(topo, Constants.KafkaConsumerOp, getKafkaParams(), schema);
SPLStream msgStream = SPLStreams.stringToSPLStream(consumerStream.convert(t -> String.valueOf(t.getDouble("message"))));
// test the output of the consumer
StreamsContext<?> context = StreamsContextFactory.getStreamsContext(Type.DISTRIBUTED_TESTER);
Tester tester = topo.getTester();
Condition<List<String>> condition = KafkaSPLStreamsUtils.stringContentsUnordered(tester, msgStream, DATA);
tester.complete(context, new HashMap<>(), condition, 30, TimeUnit.SECONDS);
// check the results
Assert.assertTrue(condition.getResult().size() > 0);
Assert.assertTrue(condition.getResult().toString(), condition.valid());
}
use of com.ibm.streams.operator.StreamSchema in project streamsx.kafka by IBMStreams.
the class KafkaOperatorsFloatTypeTest method kafkaFloatTypeTest.
@Test
public void kafkaFloatTypeTest() throws Exception {
Topology topo = getTopology();
StreamSchema schema = KafkaSPLStreamsUtils.FLOAT_SCHEMA;
// create the producer (produces tuples after a short delay)
TStream<Float> srcStream = topo.strings(DATA).transform(s -> Float.valueOf(s)).modify(new Delay<>(5000));
SPLStream splSrcStream = SPLStreams.convertStream(srcStream, new Converter(), schema);
SPL.invokeSink(Constants.KafkaProducerOp, splSrcStream, getKafkaParams());
// create the consumer
SPLStream consumerStream = SPL.invokeSource(topo, Constants.KafkaConsumerOp, getKafkaParams(), schema);
SPLStream msgStream = SPLStreams.stringToSPLStream(consumerStream.convert(t -> String.valueOf(t.getFloat("message"))));
// test the output of the consumer
StreamsContext<?> context = StreamsContextFactory.getStreamsContext(Type.DISTRIBUTED_TESTER);
Tester tester = topo.getTester();
Condition<List<String>> condition = KafkaSPLStreamsUtils.stringContentsUnordered(tester, msgStream, DATA);
tester.complete(context, new HashMap<>(), condition, 30, TimeUnit.SECONDS);
// check the results
Assert.assertTrue(condition.getResult().size() > 0);
Assert.assertTrue(condition.getResult().toString(), condition.valid());
}
use of com.ibm.streams.operator.StreamSchema in project streamsx.kafka by IBMStreams.
the class KafkaOperatorsLongTypeTest method kafkaLongTypeTest.
@Test
public void kafkaLongTypeTest() throws Exception {
Topology topo = getTopology();
StreamSchema schema = KafkaSPLStreamsUtils.LONG_SCHEMA;
// create the producer (produces tuples after a short delay)
TStream<Long> srcStream = topo.strings(DATA).transform(s -> Long.valueOf(s)).modify(new Delay<>(5000));
SPLStream splSrcStream = SPLStreams.convertStream(srcStream, new Converter(), schema);
SPL.invokeSink(Constants.KafkaProducerOp, splSrcStream, getKafkaParams());
// create the consumer
SPLStream consumerStream = SPL.invokeSource(topo, Constants.KafkaConsumerOp, getKafkaParams(), schema);
SPLStream msgStream = SPLStreams.stringToSPLStream(consumerStream.convert(t -> String.valueOf(t.getLong("message"))));
// test the output of the consumer
StreamsContext<?> context = StreamsContextFactory.getStreamsContext(Type.DISTRIBUTED_TESTER);
Tester tester = topo.getTester();
Condition<List<String>> condition = KafkaSPLStreamsUtils.stringContentsUnordered(tester, msgStream, DATA);
tester.complete(context, new HashMap<>(), condition, 30, TimeUnit.SECONDS);
// check the results
Assert.assertTrue(condition.getResult().size() > 0);
Assert.assertTrue(condition.getResult().toString(), condition.valid());
}
Aggregations