use of com.ibm.streamsx.kafka.KafkaOperatorException in project streamsx.kafka by IBMStreams.
the class AbstractKafkaProducerOperator method initialize.
/**
* Initialize this operator. Called once before any tuples are processed.
*
* @param context
* OperatorContext for this operator.
* @throws Exception
* Operator failure, will cause the enclosing PE to terminate.
*/
@Override
public synchronized void initialize(OperatorContext context) throws Exception {
super.initialize(context);
logger.trace(// $NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
"Operator " + context.getName() + " initializing in PE: " + context.getPE().getPEId() + " in Job: " + context.getPE().getJobId());
StreamSchema inputSchema = context.getStreamingInputs().get(0).getStreamSchema();
// check for key attribute and get type
Attribute keyAttribute = null;
if (keyAttr != null && keyAttr.getAttribute() != null) {
keyAttribute = keyAttr.getAttribute();
} else {
keyAttribute = inputSchema.getAttribute(DEFAULT_KEY_ATTR_NAME);
}
final List<MetaType> supportedAttrTypes = Arrays.asList(SUPPORTED_ATTR_TYPES);
if (keyAttribute != null) {
// check type
if (!supportedAttrTypes.contains(keyAttribute.getType().getMetaType())) {
final String msg = Messages.getString("UNSUPPORTED_ATTR_TYPE", context.getKind(), keyAttribute.getType().getLanguageType(), keyAttribute.getName());
logger.error(msg);
throw new KafkaOperatorException(msg);
}
keyType = keyAttribute.getType().getObjectType();
keyAttributeIndex = keyAttribute.getIndex();
} else {
keyAttributeIndex = -1;
}
// check for partition attribute
Attribute partitionAttribute = null;
if (partitionAttr != null && partitionAttr.getAttribute() != null) {
partitionAttribute = partitionAttr.getAttribute();
} else {
partitionAttribute = inputSchema.getAttribute(DEFAULT_PARTITION_ATTR_NAME);
}
partitionAttributeIndex = partitionAttribute != null ? partitionAttribute.getIndex() : -1;
// check for timestamp attribute
Attribute timestampAttribute = null;
if (timestampAttr != null && timestampAttr.getAttribute() != null) {
timestampAttribute = timestampAttr.getAttribute();
} else {
timestampAttribute = inputSchema.getAttribute(DEFAULT_TIMESTAMP_ATTR_NAME);
}
timestampAttributeIndex = timestampAttribute != null ? timestampAttribute.getIndex() : -1;
Attribute topicAttribute = null;
if (topicAttr != null && topicAttr.getAttribute() != null) {
topicAttribute = topicAttr.getAttribute();
} else {
topicAttribute = inputSchema.getAttribute(DEFAULT_TOPIC_ATTR_NAME);
}
topicAttributeIndex = topicAttribute != null ? topicAttribute.getIndex() : -1;
Attribute messageAttribute = messageAttr.getAttribute();
// check message attribute type
if (!supportedAttrTypes.contains(messageAttribute.getType().getMetaType())) {
final String msg = Messages.getString("UNSUPPORTED_ATTR_TYPE", context.getKind(), messageAttribute.getType().getLanguageType(), messageAttribute.getName());
logger.error(msg);
throw new KafkaOperatorException(msg);
}
messageType = messageAttribute.getType().getObjectType();
crContext = context.getOptionalContext(ConsistentRegionContext.class);
// isResetting can always be false when not in consistent region.
// When not in consistent region, reset happens _before_ allPortsReady(), so that tuple processing
// is not conflicting with RESET processing, for which this flag is used.
isResetting = new AtomicBoolean(crContext != null && context.getPE().getRelaunchCount() > 0);
if (getOperatorContext().getNumberOfStreamingOutputs() > 0) {
this.errorPortSubmitter = new OutputPortSubmitter(context, O_PORT_DEFAULT_QUEUE_CAPACITY, O_PORT_QUEUE_OFFER_TIMEOUT_MS, outputErrorsOnly);
}
initProducer();
final boolean registerAsInput = false;
registerForDataGovernance(context, topics, registerAsInput);
// $NON-NLS-1$
logger.debug(">>> Operator initialized <<<");
}
Aggregations