use of com.ibm.streamsx.kafka.KafkaClientInitializationException in project streamsx.kafka by IBMStreams.
the class AbstractKafkaConsumerOperator method reset.
@Override
public void reset(Checkpoint checkpoint) throws Exception {
final int attempt = crContext == null ? -1 : crContext.getResetAttempt();
final long sequenceId = checkpoint.getSequenceId();
logger.log(DEBUG_LEVEL, MsgFormatter.format(">>> RESET (ckpt id/attempt={0,number,#}/{1})", sequenceId, (crContext == null ? "-" : "" + attempt)));
final long before = System.currentTimeMillis();
try {
final ObjectInputStream inputStream = checkpoint.getInputStream();
final int chkptMagic = inputStream.readInt();
logger.info("magic read from checkpoint: " + chkptMagic);
ConsumerClient consumer = consumerRef.get();
if (chkptMagic == consumer.getImplementationMagic()) {
logger.info("checkpoint fits current ConsumerClient implementation.");
} else {
logger.info("checkpoint does not fit current ConsumerClient implementation. Building matching client ...");
if (consumer.isProcessing()) {
consumer.onShutdown(SHUTDOWN_TIMEOUT, SHUTDOWN_TIMEOUT_TIMEUNIT);
}
final ConsumerClientBuilder builder = magics.get(chkptMagic);
final ConsumerClient newClient = builder.build();
if (consumerRef.compareAndSet(consumer, newClient)) {
try {
newClient.startConsumer();
logger.info(MsgFormatter.format("consumer client implementation {0} replaced by {1}", consumer.getClass().getName(), newClient.getClass().getName()));
} catch (KafkaClientInitializationException e) {
logger.error(e.getLocalizedMessage(), e);
logger.error("root cause: " + e.getRootCause());
throw new KafkaOperatorResetFailedException("consumer client replacement failed", e);
}
} else {
if (consumerRef.get().getImplementationMagic() != chkptMagic) {
logger.warn(MsgFormatter.format("consumer client replacement failed"));
throw new KafkaOperatorResetFailedException("consumer client replacement failed");
}
}
}
consumer = consumerRef.get();
if (consumer.isProcessing()) {
// it is up to the consumer client implementation to stop polling.
consumer.onReset(checkpoint);
}
} catch (InterruptedException e) {
logger.log(DEBUG_LEVEL, "RESET interrupted)");
return;
} finally {
// by another PE, i.e. when relaunch count == 0 in initialize(context)
if (resettingLatch != null)
resettingLatch.countDown();
final long after = System.currentTimeMillis();
final long duration = after - before;
logger.log(DEBUG_LEVEL, MsgFormatter.format(">>> RESET took {0,number,#} ms (ckpt id/attempt={1,number,#}/{2,number,#})", duration, sequenceId, attempt));
}
}
use of com.ibm.streamsx.kafka.KafkaClientInitializationException in project streamsx.kafka by IBMStreams.
the class AbstractKafkaConsumerOperator method process.
@Override
public void process(StreamingInput<Tuple> stream, Tuple tuple) throws Exception {
synchronized (monitor) {
logger.info("process >>> ENTRY");
boolean interrupted = false;
try {
final ConsumerClient consumer = consumerRef.get();
logger.info("current consumer implementation: " + consumer);
ControlPortAction actn = ControlPortAction.fromJSON(tuple.getString(0));
final ControlPortActionType action = actn.getActionType();
if (consumer.supports(actn)) {
logger.info("consumer implementation supports " + action);
consumer.onControlPortAction(actn);
} else {
if ((consumer instanceof DummyConsumerClient) && (action == ControlPortActionType.ADD_ASSIGNMENT || action == ControlPortActionType.ADD_SUBSCRIPTION)) {
logger.info("replacing ConsumerClient by a version that supports " + action);
// we can change the client implementation
if (consumer.isProcessing()) {
consumer.onShutdown(SHUTDOWN_TIMEOUT, SHUTDOWN_TIMEOUT_TIMEUNIT);
}
final ConsumerClientBuilder builder;
if (action == ControlPortActionType.ADD_SUBSCRIPTION) {
if (crContext != null) {
logger.error("topic subscription via control port is not supported when the operator is used in a consistent region. Ignoring " + actn.getJson());
nFailedControlTuples.increment();
logger.info("process <<< EXIT");
return;
}
builder = this.groupEnabledClientBuilder;
} else {
if (this.groupIdSpecified) {
logger.warn(MsgFormatter.format("A group.id is specified. The ''{0}'' operator " + "will NOT participate in a consumer group because the operator assigns partitions.", getOperatorContext().getName()));
}
builder = this.staticAssignClientBuilder;
}
logger.info("Using client builder: " + builder);
final ConsumerClient newClient = builder.build();
logger.info(MsgFormatter.format("consumer client {0} created", newClient.getClass().getName()));
try {
newClient.startConsumer();
if (consumerRef.compareAndSet(consumer, newClient)) {
logger.info(MsgFormatter.format("consumer client implementation {0} replaced by {1}", consumer.getClass().getName(), newClient.getClass().getName()));
newClient.onControlPortAction(actn);
} else {
logger.warn(MsgFormatter.format("consumer client replacement failed"));
newClient.onShutdown(SHUTDOWN_TIMEOUT, SHUTDOWN_TIMEOUT_TIMEUNIT);
nFailedControlTuples.increment();
}
} catch (KafkaClientInitializationException e) {
logger.error(e.getLocalizedMessage(), e);
logger.error("root cause: " + e.getRootCause());
nFailedControlTuples.increment();
throw e;
}
} else {
// unsupported action
logger.error("Could not process control tuple. Action " + action + " is not supported by the '" + consumer.getClass().getName() + "' ConsumerClient implementation. Tuple: '" + tuple + "'");
nFailedControlTuples.increment();
}
}
} catch (ControlPortJsonParseException e) {
logger.error("Could not process control tuple. Parsing JSON '" + e.getJson() + "' failed.");
logger.error(e.getLocalizedMessage(), e);
nFailedControlTuples.increment();
} catch (InterruptedException e) {
// interrupted during shutdown
interrupted = true;
nFailedControlTuples.increment();
} catch (Exception e) {
e.printStackTrace();
logger.error("Could not process control tuple: '" + tuple + "':" + e);
logger.error(e.getLocalizedMessage(), e);
nFailedControlTuples.increment();
} finally {
final ConsumerClient consumer = consumerRef.get();
if (!interrupted && consumer.isSubscribedOrAssigned()) {
logger.info("sendStartPollingEvent ...");
consumer.sendStartPollingEvent();
}
logger.info("process <<< EXIT");
}
}
}
use of com.ibm.streamsx.kafka.KafkaClientInitializationException in project streamsx.kafka by IBMStreams.
the class AbstractKafkaConsumerOperator method initialize.
@Override
public void initialize(OperatorContext context) throws Exception {
synchronized (monitor) {
// Must call super.initialize(context) to correctly setup an operator.
super.initialize(context);
logger.info(// $NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
"Operator " + context.getName() + " initializing in PE: " + context.getPE().getPEId() + " in Job: " + context.getPE().getJobId());
shutdown = new AtomicBoolean(false);
StreamSchema outputSchema = context.getStreamingOutputs().get(0).getStreamSchema();
outputMessageAttrIdx = outputSchema.getAttributeIndex(outputMessageAttrName);
outputKeyAttrIdx = outputSchema.getAttributeIndex(outputKeyAttrName);
outputTopicAttrIdx = outputSchema.getAttributeIndex(outputTopicAttrName);
outputTimetampAttrIdx = outputSchema.getAttributeIndex(outputMessageTimestampAttrName);
outputPartitionAttrIdx = outputSchema.getAttributeIndex(outputPartitionAttrName);
outputOffsetAttrIdx = outputSchema.getAttributeIndex(outputOffsetAttrName);
Class<?> keyClass = outputKeyAttrIdx >= 0 ? getAttributeType(context.getStreamingOutputs().get(0), outputKeyAttrName) : // default to String.class for key type
String.class;
Class<?> valueClass = getAttributeType(context.getStreamingOutputs().get(0), outputMessageAttrName);
KafkaOperatorProperties kafkaProperties = getKafkaProperties();
if (this.startPosition == StartPosition.Time && !context.getParameterNames().contains(START_TIME_PARAM)) {
throw new KafkaConfigurationException(Messages.getString("START_TIME_PARAM_NOT_FOUND"));
}
if (this.startPosition == StartPosition.Offset && !context.getParameterNames().contains(START_OFFSET_PARAM)) {
throw new KafkaConfigurationException(Messages.getString("START_OFFSET_PARAM_NOT_FOUND"));
}
// set the group ID property if the groupId parameter is specified
if (groupId != null && !groupId.isEmpty()) {
kafkaProperties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId);
}
final boolean hasInputPorts = context.getStreamingInputs().size() > 0;
final String gid = kafkaProperties.getProperty(ConsumerConfig.GROUP_ID_CONFIG);
this.groupIdSpecified = gid != null && !gid.trim().isEmpty();
logger.log(DEBUG_LEVEL, "group-ID specified: " + this.groupIdSpecified);
if (crContext != null) {
commitMode = CommitMode.ConsistentRegionDrain;
} else {
final Set<String> parameterNames = context.getParameterNames();
commitMode = parameterNames.contains(COMMIT_COUNT_PARAM) ? CommitMode.TupleCount : CommitMode.Time;
}
if (this.staticGroupMember) {
// calculate a unique group.instance.id that is consistent across operator restarts
final ProcessingElement pe = context.getPE();
final int iidH = pe.getInstanceId().hashCode();
final int opnH = context.getName().hashCode();
final String groupInstanceId = MsgFormatter.format("i{0}-o{1}", (iidH < 0 ? "N" + (-iidH) : "P" + iidH), (opnH < 0 ? "N" + (-opnH) : "P" + opnH));
logger.info("Generated group.instance.id: " + groupInstanceId);
kafkaProperties.put(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, groupInstanceId);
}
// create the builders for the consumer clients
if (crContext == null) {
this.groupEnabledClientBuilder = new NonCrKafkaConsumerGroupClient.Builder().setOperatorContext(context).setKafkaProperties(kafkaProperties).setKeyClass(keyClass).setValueClass(valueClass).setSingleTopic(this.topics != null && this.topics.size() == 1).setPollTimeout(this.consumerPollTimeout).setInitialStartPosition(this.startPosition).setCommitMode(commitMode).setCommitPeriod(commitPeriod).setCommitCount(commitCount);
this.staticAssignClientBuilder = new NonCrKafkaConsumerClient.Builder().setOperatorContext(context).setKafkaProperties(kafkaProperties).setKeyClass(keyClass).setValueClass(valueClass).setPollTimeout(this.consumerPollTimeout).setInitialStartPosition(this.startPosition).setCommitMode(commitMode).setCommitPeriod(commitPeriod).setCommitCount(commitCount);
} else {
// CR
this.groupEnabledClientBuilder = new CrKafkaConsumerGroupClient.Builder().setOperatorContext(context).setKafkaProperties(kafkaProperties).setKeyClass(keyClass).setValueClass(valueClass).setPollTimeout(this.consumerPollTimeout).setSingleTopic(this.topics != null && this.topics.size() == 1).setTriggerCount(this.triggerCount).setInitialStartPosition(this.startPosition).setInitialStartTimestamp(this.startTime);
this.staticAssignClientBuilder = new CrKafkaStaticAssignConsumerClient.Builder().setOperatorContext(context).setKafkaProperties(kafkaProperties).setKeyClass(keyClass).setValueClass(valueClass).setPollTimeout(this.consumerPollTimeout).setTriggerCount(this.triggerCount);
}
magics.put(this.staticAssignClientBuilder.getImplementationMagic(), this.staticAssignClientBuilder);
magics.put(this.groupEnabledClientBuilder.getImplementationMagic(), this.groupEnabledClientBuilder);
final ConsumerClientBuilder builder;
if (hasInputPorts) {
if (crContext != null) {
// in CR, we do not groupManagement with input port:
builder = this.staticAssignClientBuilder;
} else {
// not in CR: select the right builder in checkpoint reset or on first partition/topic addition
builder = new DummyConsumerClient.Builder().setOperatorContext(context).setKafkaProperties(kafkaProperties);
magics.put(builder.getImplementationMagic(), builder);
}
} else {
boolean groupManagementEnabled;
if (Features.ENABLE_GROUP_MANAGEMENT_NO_USER_GROUP_ID) {
groupManagementEnabled = this.partitions == null || this.partitions.isEmpty();
} else {
// legacy (2.x) behavior
groupManagementEnabled = this.groupIdSpecified && (this.partitions == null || this.partitions.isEmpty());
}
if (this.groupIdSpecified && !groupManagementEnabled) {
if (this.partitions != null && !this.partitions.isEmpty()) {
logger.warn(MsgFormatter.format("The group.id ''{0}'' is specified. The ''{1}'' operator " + "will NOT participate in a consumer group because partitions to consume are specified.", gid, context.getName()));
}
}
// when group management is disabled and no input port is configured, we must not subscribe with pattern
// When we are here it is already guaranteed that we have one of the 'topic' or 'pattern' parameter
final boolean p = this.pattern != null;
final boolean t = this.topics != null;
assert ((p && !t) || (t && !p));
if (!groupManagementEnabled && p) {
final String msg = Messages.getString("PATTERN_SUBSCRIPTION_REQUIRES_GROUP_MGT", PATTERN_PARAM, context.getName(), context.getKind());
logger.error(msg);
throw new KafkaConfigurationException(msg);
}
builder = groupManagementEnabled ? this.groupEnabledClientBuilder : this.staticAssignClientBuilder;
}
ConsumerClient client = builder.build();
consumerRef = new AtomicReference<>(client);
logger.info(MsgFormatter.format("consumer client {0} created", client.getClass().getName()));
try {
client.startConsumer();
} catch (KafkaClientInitializationException e) {
e.printStackTrace();
logger.error(e.getLocalizedMessage(), e);
logger.error("root cause: " + e.getRootCause());
throw e;
}
// input port not used, so topic or pattern must be defined
if (!hasInputPorts) {
if (this.topics != null) {
final boolean registerAsInput = true;
registerForDataGovernance(context, topics, registerAsInput);
switch(startPosition) {
case Time:
client.subscribeToTopicsWithTimestamp(topics, partitions, startTime);
break;
case Offset:
client.subscribeToTopicsWithOffsets(topics.get(0), partitions, startOffsets);
break;
default:
client.subscribeToTopics(topics, partitions, startPosition);
}
} else {
switch(startPosition) {
case Time:
client.subscribeToTopicsWithTimestamp(pattern, startTime);
break;
case Beginning:
case End:
case Default:
client.subscribeToTopics(pattern, startPosition);
break;
default:
throw new KafkaClientInitializationException("Illegal 'startPosition' value for subscription with pattern: " + startPosition);
}
}
}
if (crContext != null && context.getPE().getRelaunchCount() > 0) {
resettingLatch = new CountDownLatch(1);
}
processThread = getOperatorContext().getThreadFactory().newThread(new Runnable() {
@Override
public void run() {
try {
processThreadEndedLatch = new CountDownLatch(1);
// initiates start polling if assigned or subscribed by sending an event
produceTuples();
} catch (Exception e) {
// $NON-NLS-1$
Logger.getLogger(this.getClass()).error("Operator error", e);
// Otherwise this thread terminates leaving the PE in a healthy state without being healthy.
throw new RuntimeException(e.getLocalizedMessage(), e);
} finally {
if (processThreadEndedLatch != null)
processThreadEndedLatch.countDown();
logger.info("process thread (tid = " + Thread.currentThread().getId() + ") ended.");
}
}
});
processThread.setDaemon(false);
}
}
use of com.ibm.streamsx.kafka.KafkaClientInitializationException in project streamsx.kafka by IBMStreams.
the class AbstractKafkaConsumerClient method startConsumer.
/**
* Validates the setup of the consumer client by calling the {@link #validate()} method,
* creates the Kafka consumer object and starts the consumer and event thread.
* This method ensures that the event thread is running when it returns.
* Methods that overwrite this method must call super.startConsumer().
* @throws InterruptedException The thread has been interrupted.
* @throws KafkaClientInitializationException The client could not be initialized
*/
public void startConsumer() throws InterruptedException, KafkaClientInitializationException {
try {
validate();
} catch (Exception e) {
throw new KafkaClientInitializationException(e.getLocalizedMessage(), e);
}
consumerInitLatch = new CountDownLatch(1);
Thread eventThread = getOperatorContext().getThreadFactory().newThread(new Runnable() {
@Override
public void run() {
try {
maxPollRecords = getMaxPollRecordsFromProperties(kafkaProperties);
maxPollIntervalMs = getMaxPollIntervalMsFromProperties(kafkaProperties);
fetchMaxBytes = getFetchMaxBytesFromProperties(kafkaProperties);
SystemProperties.resolveApplicationDir(getOperatorContext().getPE().getApplicationDirectory().getAbsolutePath());
consumer = new KafkaConsumer<>(kafkaProperties);
processing.set(true);
} catch (Exception e) {
initializationException = e;
return;
} finally {
// notify that consumer is ready
consumerInitLatch.countDown();
}
try {
runEventLoop();
} catch (InterruptedException e) {
logger.debug("Event thread interrupted. Terminating thread.");
return;
} finally {
processing.set(false);
logger.info("event thread (tid = " + Thread.currentThread().getId() + ") ended.");
}
}
});
eventThread.setDaemon(false);
eventThread.start();
// wait for consumer thread to be running before returning
consumerInitLatch.await();
if (this.metricsFetcher == null) {
this.metricsFetcher = new MetricsFetcher(getOperatorContext(), new MetricsProvider() {
@Override
public Map<MetricName, ? extends org.apache.kafka.common.Metric> getMetrics() {
return consumer.metrics();
}
@Override
public String createCustomMetricName(MetricName metricName) throws KafkaMetricException {
return ConsumerMetricsReporter.createOperatorMetricName(metricName);
}
}, ConsumerMetricsReporter.getMetricsFilter(), AbstractKafkaClient.METRICS_REPORT_INTERVAL);
}
if (initializationException != null)
throw new KafkaClientInitializationException(initializationException.getLocalizedMessage(), initializationException);
}
Aggregations