use of com.ibm.streamsx.kafka.properties.KafkaOperatorProperties in project streamsx.kafka by IBMStreams.
the class AbstractKafkaOperator method initialize.
@Override
public synchronized void initialize(OperatorContext context) throws Exception {
super.initialize(context);
// load the Kafka properties
kafkaProperties = new KafkaOperatorProperties();
loadProperties();
// set the client ID property if the clientId parameter is specified
if (clientId != null && !clientId.isEmpty()) {
kafkaProperties.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, clientId);
}
if (userLib == null) {
userLib = new String[] { context.getPE().getApplicationDirectory() + DEFAULT_USER_LIB_DIR };
} else {
// convert all of the paths to absolute paths (if necessary)
List<String> absLibPaths = new ArrayList<String>();
for (String libPath : userLib) absLibPaths.add(convertToAbsolutePath(libPath).getAbsolutePath());
userLib = absLibPaths.toArray(new String[0]);
}
// $NON-NLS-1$
logger.info("Loading user libraries: " + Arrays.asList(userLib));
context.addClassLibraries(userLib);
}
use of com.ibm.streamsx.kafka.properties.KafkaOperatorProperties in project streamsx.kafka by IBMStreams.
the class TransactionalKafkaProducerClient method getConsumerProperties.
private KafkaOperatorProperties getConsumerProperties() throws Exception {
KafkaOperatorProperties consumerProps = new KafkaOperatorProperties();
// copy those producer properties that have valid producer property names and exist also as a consumer property
Set<String> consumerConfigNames = ConsumerConfig.configNames();
Set<String> producerConfigNames = ProducerConfig.configNames();
for (Entry<?, ?> producerProp : this.kafkaProperties.entrySet()) {
if (producerConfigNames.contains(producerProp.getKey()) && consumerConfigNames.contains(producerProp.getKey())) {
consumerProps.put(producerProp.getKey(), producerProp.getValue());
}
}
logger.debug("infered consumer properties: " + consumerProps);
consumerProps.put(ConsumerConfig.CLIENT_ID_CONFIG, getRandomId(CONSUMER_ID_PREFIX));
consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, getRandomId(GROUP_ID_PREFIX));
// deserializers for value and key should not be inferred from producer's serializers because this fails in case of custom serializers.
// use ByteArrayDeserializers instead. key and value are not used when reading from the control topic
consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
// We have to setup isolation.level=read_committed for the case that we die between send to control topic and commitTransaction()
consumerProps.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
logger.debug("final consumer properties: " + consumerProps);
return consumerProps;
}
use of com.ibm.streamsx.kafka.properties.KafkaOperatorProperties in project streamsx.kafka by IBMStreams.
the class AbstractKafkaConsumerOperator method initialize.
@Override
public synchronized void initialize(OperatorContext context) throws Exception {
// Must call super.initialize(context) to correctly setup an operator.
super.initialize(context);
logger.trace(// $NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
"Operator " + context.getName() + " initializing in PE: " + context.getPE().getPEId() + " in Job: " + context.getPE().getJobId());
shutdown = new AtomicBoolean(false);
gson = new Gson();
StreamSchema outputSchema = context.getStreamingOutputs().get(0).getStreamSchema();
hasOutputKey = outputSchema.getAttribute(outputKeyAttrName) != null;
hasOutputTopic = outputSchema.getAttribute(outputTopicAttrName) != null;
hasOutputTimetamp = outputSchema.getAttribute(outputMessageTimestampAttrName) != null;
hasOutputPartition = outputSchema.getAttribute(outputPartitionAttrName) != null;
hasOutputOffset = outputSchema.getAttribute(outputOffsetAttrName) != null;
Class<?> keyClass = hasOutputKey ? getAttributeType(context.getStreamingOutputs().get(0), outputKeyAttrName) : // default to String.class for key type
String.class;
Class<?> valueClass = getAttributeType(context.getStreamingOutputs().get(0), outputMessageAttrName);
KafkaOperatorProperties kafkaProperties = getKafkaProperties();
// $NON-NLS-1$
logger.debug("kafkaProperties: " + kafkaProperties);
// set the group ID property if the groupId parameter is specified
if (groupId != null && !groupId.isEmpty()) {
kafkaProperties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId);
}
consumer = new KafkaConsumerClient.KafkaConsumerClientBuilder().setKafkaProperties(kafkaProperties).setKeyClass(keyClass).setValueClass(valueClass).setOperatorContext(context).build();
// If an exception occurred during init, throw it!
if (consumer.getInitializationException() != null) {
Exception e = consumer.getInitializationException();
e.printStackTrace();
logger.error(e.getLocalizedMessage(), e);
throw e;
}
// input port not use, so topic must be defined
if (context.getStreamingInputs().size() == 0) {
if (topics != null) {
registerForDataGovernance(context, topics);
if (startPosition == StartPosition.Time) {
consumer.subscribeToTopicsWithTimestamp(topics, partitions, startTime);
} else if (startPosition == StartPosition.Offset) {
consumer.subscribeToTopicsWithOffsets(topics, partitions, startOffsets);
} else {
consumer.subscribeToTopics(topics, partitions, startPosition);
}
}
}
crContext = context.getOptionalContext(ConsistentRegionContext.class);
if (crContext != null && context.getPE().getRelaunchCount() > 0) {
resettingLatch = new CountDownLatch(1);
}
processThread = getOperatorContext().getThreadFactory().newThread(new Runnable() {
@Override
public void run() {
try {
produceTuples();
} catch (Exception e) {
// $NON-NLS-1$
Logger.getLogger(this.getClass()).error("Operator error", e);
// Otherwise this thread terminates leaving the PE in a healthy state without being healthy.
throw new RuntimeException(e);
}
}
});
processThread.setDaemon(false);
}
use of com.ibm.streamsx.kafka.properties.KafkaOperatorProperties in project streamsx.kafka by IBMStreams.
the class AbstractKafkaProducerOperator method initProducer.
private void initProducer() throws Exception {
// configure producer
KafkaOperatorProperties props = getKafkaProperties();
if (crContext == null) {
logger.info("Creating KafkaProducerClient...");
producer = new KafkaProducerClient(getOperatorContext(), keyType, messageType, props);
} else {
switch(consistentRegionPolicy) {
case AtLeastOnce:
logger.info("Creating AtLeastOnceKafkaProducerClient...");
producer = new AtLeastOnceKafkaProducerClient(getOperatorContext(), keyType, messageType, props);
break;
case Transactional:
logger.info("Creating TransactionalKafkaProducerClient...");
producer = new TransactionalKafkaProducerClient(getOperatorContext(), keyType, messageType, props, /*lazyTransactionBegin*/
true);
break;
default:
throw new RuntimeException("Unrecognized ConsistentRegionPolicy: " + consistentRegionPolicy);
}
}
}
Aggregations