use of com.ibm.streamsx.kafka.clients.OffsetManager in project streamsx.kafka by IBMStreams.
the class CrKafkaStaticAssignConsumerClient method processResetToInitEvent.
/**
* Resets the client to an initial state when no checkpoint is available.
*/
@Override
protected void processResetToInitEvent() {
logger.log(DEBUG_LEVEL, "processResetToInitEvent() - entering");
try {
final OffsetManager ofsm = getDeserializedOffsetManagerCV();
offsetManager.putOffsets(ofsm);
// $NON-NLS-1$
logger.log(DEBUG_LEVEL, "offsetManager after applying initial state = " + offsetManager);
// refresh from the cluster as we may
// have written to the topics
refreshFromCluster();
// remove records from queue
clearDrainBuffer();
getMessageQueue().clear();
this.nSubmittedRecords = 0;
} catch (Exception e) {
throw new RuntimeException(e.getLocalizedMessage(), e);
}
logger.log(DEBUG_LEVEL, "processResetToInitEvent() - exiting");
}
use of com.ibm.streamsx.kafka.clients.OffsetManager in project streamsx.kafka by IBMStreams.
the class NonCrKafkaConsumerGroupClient method processControlPortActionEvent.
/**
* Changes the subscription of the consumer via control port.
*
* @see com.ibm.streamsx.kafka.clients.consumer.AbstractKafkaConsumerClient#processControlPortActionEvent(com.ibm.streamsx.kafka.clients.consumer.ControlPortAction)
*/
@Override
protected void processControlPortActionEvent(ControlPortAction action) {
try {
final ControlPortActionType actionType = action.getActionType();
if (actionType == ControlPortActionType.ADD_SUBSCRIPTION || actionType == ControlPortActionType.REMOVE_SUBSCRIPTION) {
trace.info("action: " + action);
} else if (trace.isDebugEnabled()) {
trace.debug("action: " + action);
}
final Set<String> oldSubscription = getConsumer().subscription();
final Set<String> newSubscription = new HashSet<>(oldSubscription);
trace.info("current topic subscription: " + newSubscription);
boolean subscriptionChanged = false;
switch(actionType) {
case ADD_SUBSCRIPTION:
action.getTopics().forEach(tpc -> {
newSubscription.add(tpc);
});
break;
case REMOVE_SUBSCRIPTION:
action.getTopics().forEach(tpc -> {
newSubscription.remove(tpc);
});
break;
default:
throw new UnsupportedControlPortActionException("processControlPortActionEvent(): action: " + actionType + " not supported by this client: " + getThisClassName());
}
subscriptionChanged = !newSubscription.equals(oldSubscription);
if (!subscriptionChanged) {
trace.info("Subscriptiopn is unchanged: " + newSubscription);
} else {
if (newSubscription.size() > 0) {
trace.info("Subscription changed. New subscription: " + newSubscription);
} else {
// With Kafka client 2.3, no partition rebalance happened when we only unsubscribe, so that the
// onPartitionsRevoked callback has not been called, where we usually commit offsets before we give
// away partitions.
// With Kafka client 2.5.1, the things are different. The onPartitionsRevoked callback is called,
// so that we could commit the offsets there and remove the following code block, but it is also safe,
// to preserve the old logic and commit now.
trace.info("Unsubscribing all topics. Going to commit offsets.");
// remove the content of the queue. It contains uncommitted messages.
getMessageQueue().clear();
OffsetManager offsetManager = getOffsetManager();
try {
awaitMessageQueueProcessed();
// the post-condition is, that all messages from the queue have submitted as
// tuples and its offsets +1 are stored in OffsetManager.
final boolean commitSync = true;
final boolean commitPartitionWise = false;
CommitInfo offsets = new CommitInfo(commitSync, commitPartitionWise);
synchronized (offsetManager) {
Set<TopicPartition> partitionsInOffsetManager = offsetManager.getMappedTopicPartitions();
Set<TopicPartition> currentAssignment = getAssignedPartitions();
for (TopicPartition tp : partitionsInOffsetManager) {
if (currentAssignment.contains(tp)) {
offsets.put(tp, offsetManager.getOffset(tp.topic(), tp.partition()));
}
}
}
if (!offsets.isEmpty()) {
commitOffsets(offsets);
}
// reset the counter for periodic commit
resetCommitPeriod(System.currentTimeMillis());
} catch (InterruptedException | RuntimeException e) {
// Ignore InterruptedException, RuntimeException from commitOffsets is already traced.
}
// avoid committing offsets in onPartitionsRevoked (if called)
offsetManager.clear();
}
subscribe(newSubscription, this);
// getChkptContext().getKind() is not reported properly. Streams Build 20180710104900 (4.3.0.0) never returns OPERATOR_DRIVEN
if (getCheckpointKind() == Kind.OPERATOR_DRIVEN) {
trace.info("initiating checkpointing with current topic subscription");
// createCheckpoint() throws IOException
boolean result = getChkptContext().createCheckpoint();
trace.info("createCheckpoint() result: " + result);
}
}
} catch (Exception e) {
trace.error(e.getLocalizedMessage(), e);
throw new KafkaOperatorRuntimeException(e.getMessage(), e);
}
}
use of com.ibm.streamsx.kafka.clients.OffsetManager in project streamsx.kafka by IBMStreams.
the class NonCrKafkaConsumerGroupClient method onPartitionsRevoked.
/**
* Callback method of the ConsumerRebalanceListener.
* For incremental cooperative rebalancing we must assume that only a part of the assigned partitions can be revoked without
* being followed by an {@link #onPartitionsAssigned(Collection)}.
* The eager rebalancing protocol revokes always the complete partition assignment followed by {@link #onPartitionsAssigned(Collection)}.
* @param partitions The revoked partitions.
* @see org.apache.kafka.clients.consumer.ConsumerRebalanceListener#onPartitionsRevoked(java.util.Collection)
*/
@Override
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
// Since Kafka 2.4 we MUST NOT assume any more that the complete assignment is revoked.
// KIP-429 (incremental cooperative rebalancing) allows that a part of the assignment
// is being revoked.
Set<TopicPartition> assignment = getConsumer().assignment();
trace.info("onPartitionsRevoked: current assignment: " + assignment);
trace.info("onPartitionsRevoked: revoked: " + partitions);
Set<TopicPartition> remainingAssignment = removeAssignedPartitions(partitions);
trace.info("onPartitionsRevoked: remaining partition assignment: " + remainingAssignment);
final boolean allRevoked = remainingAssignment.isEmpty();
// They will fetched again after rebalance by this or a different consumer.
if (allRevoked) {
getMessageQueue().clear();
} else {
getMessageQueue().removeIf(rec -> partitions.contains(new TopicPartition(rec.topic(), rec.partition())));
}
OffsetManager offsetManager = getOffsetManager();
try {
if (allRevoked) {
// TODO: What is the benefit of waiting here?
// We must be prepared anyway for the case that a consumer record belonging to a removed
// partition is in-flight for tuple submission, what can cause a stale entry in the offset manager.
awaitMessageQueueProcessed();
// the post-condition is, that all messages from the queue have submitted as
// tuples and its offsets +1 are stored in OffsetManager.
}
final boolean commitSync = true;
final boolean commitPartitionWise = false;
CommitInfo offsets = new CommitInfo(commitSync, commitPartitionWise);
synchronized (offsetManager) {
Set<TopicPartition> partitionsInOffsetManager = offsetManager.getMappedTopicPartitions();
for (TopicPartition tp : partitions) {
if (partitionsInOffsetManager.contains(tp)) {
offsets.put(tp, offsetManager.getOffset(tp.topic(), tp.partition()));
}
}
}
if (trace.isEnabledFor(DEBUG_LEVEL)) {
trace.log(DEBUG_LEVEL, "onPartitionsRevoked: committing offsets: " + offsets);
}
if (!offsets.isEmpty()) {
commitOffsets(offsets);
}
// reset the counter for periodic commit only when all partitions have been committed
if (allRevoked) {
resetCommitPeriod(System.currentTimeMillis());
}
} catch (InterruptedException | RuntimeException e) {
// Ignore InterruptedException, RuntimeException from commitOffsets is already traced.
} finally {
synchronized (offsetManager) {
if (allRevoked) {
offsetManager.clear();
} else {
for (TopicPartition tp : partitions) {
offsetManager.remove(tp.topic(), tp.partition());
}
}
}
}
}
use of com.ibm.streamsx.kafka.clients.OffsetManager in project streamsx.kafka by IBMStreams.
the class NonCrKafkaConsumerClient method processControlPortActionEvent.
/**
* @see com.ibm.streamsx.kafka.clients.consumer.AbstractKafkaConsumerClient#processControlPortActionEvent(com.ibm.streamsx.kafka.clients.consumer.ControlPortAction)
*/
@Override
protected void processControlPortActionEvent(ControlPortAction action) {
try {
final ControlPortActionType actionType = action.getActionType();
if (actionType == ControlPortActionType.ADD_ASSIGNMENT || actionType == ControlPortActionType.REMOVE_ASSIGNMENT) {
trace.info("action: " + action);
} else if (trace.isDebugEnabled()) {
trace.debug("action: " + action);
}
// create a map of current topic partitions and their fetch offsets for next record
Map<TopicPartition, Long> /* offset */
currentTopicPartitionOffsets = new HashMap<TopicPartition, Long>();
Set<TopicPartition> topicPartitions = getConsumer().assignment();
topicPartitions.forEach(tp -> currentTopicPartitionOffsets.put(tp, getConsumer().position(tp)));
boolean doNewAssign = false;
switch(actionType) {
case ADD_ASSIGNMENT:
action.getTopicPartitionOffsetMap().forEach((tp, offset) -> {
// offset can be -2, -1, or a valid offset o >= 0
// -2 means 'seek to beginning', -1 means 'seek to end'
currentTopicPartitionOffsets.put(tp, offset);
});
doNewAssign = currentTopicPartitionOffsets.size() > 0;
if (!doNewAssign) {
trace.info("topic partition assignment unchanged: " + currentTopicPartitionOffsets);
} else {
assignToPartitionsWithOffsets(currentTopicPartitionOffsets);
trace.info("assigned partitions after ADD: " + currentTopicPartitionOffsets);
// No need to update offset manager here, like adding topics, etc.
// Missing topics in the offset manager are auto-created
CommitInfo commits = new CommitInfo(true, false);
// Immediately commit the fetch offsets of _only_the_added_ topic partitions
action.getTopicPartitionOffsetMap().forEach((tp, offset) -> {
// do not put 'offset' into the commits; 'offset' can be -1 or -2 for 'end' or 'begin'
commits.put(tp, getConsumer().position(tp));
});
commitOffsets(commits);
trace.info("committed offsets of the added topic partitions: " + commits);
}
break;
case REMOVE_ASSIGNMENT:
// x 1. remove messages of the removed topic partitions from the queue - they are all uncommitted
// x 2. wait that the queue gets processed - awaitMessageQueueProcessed();
// x 3. commit the offsets of the removed topic partitions
// x 4. remove the unassigned topic partitions from the offsetManager (or simply clear?)
// x 5. update the partition assignment in the consumer
// remove messages of removed topic partitions from the message queue
getMessageQueue().removeIf(record -> belongsToPartition(record, action.getTopicPartitionOffsetMap().keySet()));
awaitMessageQueueProcessed();
// now the offset manager can be cleaned without the chance that the removed partition(s) re-appear after tuple submission
// remove removed partitions from offset manager. We can't commit offsets for those partitions we are not assigned any more.
// the post-condition is, that all messages from the queue have submitted as
// tuples and its offsets +1 are stored in OffsetManager.
final boolean commitSync = true;
final boolean commitPartitionWise = false;
CommitInfo commitOffsets = new CommitInfo(commitSync, commitPartitionWise);
OffsetManager offsetManager = getOffsetManager();
synchronized (offsetManager) {
for (TopicPartition tp : action.getTopicPartitionOffsetMap().keySet()) {
// make sure that we commit only partitions that are assigned
if (currentTopicPartitionOffsets.containsKey(tp)) {
doNewAssign = true;
long offset = offsetManager.getOffset(tp.topic(), tp.partition());
// offset is -1 if there is no mapping from topic partition to offset
if (offset >= 0)
commitOffsets.put(tp, offset);
currentTopicPartitionOffsets.remove(tp);
}
offsetManager.remove(tp.topic(), tp.partition());
}
}
if (!commitOffsets.isEmpty()) {
commitOffsets(commitOffsets);
}
// we can end up here with an empty map after removal of assignments.
if (doNewAssign) {
assignToPartitionsWithOffsets(currentTopicPartitionOffsets);
trace.info("assigned partitions after REMOVE: " + currentTopicPartitionOffsets);
} else {
trace.info("topic partition assignment unchanged: " + currentTopicPartitionOffsets);
}
break;
default:
throw new UnsupportedControlPortActionException("processControlPortActionEvent(): action: " + actionType + " not supported by this client: " + getThisClassName());
}
// getChkptContext().getKind() is not reported properly. Streams Build 20180710104900 (4.3.0.0) never returns OPERATOR_DRIVEN
if (doNewAssign && getCheckpointKind() == Kind.OPERATOR_DRIVEN) {
trace.info("initiating checkpointing with current partition assignment");
// createCheckpoint() throws IOException
boolean result = getChkptContext().createCheckpoint();
trace.info("createCheckpoint() result: " + result);
}
} catch (Exception e) {
trace.error(e.getLocalizedMessage(), e);
throw new KafkaOperatorRuntimeException(e.getMessage(), e);
}
}
use of com.ibm.streamsx.kafka.clients.OffsetManager in project streamsx.kafka by IBMStreams.
the class NonCrKafkaConsumerGroupClient method onPartitionsLost.
/**
* @see org.apache.kafka.clients.consumer.ConsumerRebalanceListener#onPartitionsLost(java.util.Collection)
*/
@Override
public void onPartitionsLost(Collection<TopicPartition> partitions) {
trace.info("onPartitionsLost: lost: " + partitions);
Set<TopicPartition> remainingAssignment = removeAssignedPartitions(partitions);
trace.info("onPartitionsLost: remaining partition assignment: " + remainingAssignment);
final boolean allLost = remainingAssignment.isEmpty();
if (allLost) {
getMessageQueue().clear();
} else {
getMessageQueue().removeIf(rec -> partitions.contains(new TopicPartition(rec.topic(), rec.partition())));
}
try {
if (allLost) {
// TODO: What is the benefit of waiting here?
// We must be prepared anyway for the case that a consumer record belonging to a removed
// partition is in-flight for tuple submission, what can cause a stale entry in the offset manager.
awaitMessageQueueProcessed();
// the post-condition is, that all messages from the queue have submitted as
// tuples and its offsets +1 are stored in OffsetManager.
}
} catch (InterruptedException e) {
// Ignore InterruptedException.
} finally {
// cleanup offset manager by lost partitions
OffsetManager offsetManager = getOffsetManager();
synchronized (offsetManager) {
if (allLost) {
offsetManager.clear();
} else {
for (TopicPartition tp : partitions) {
offsetManager.remove(tp.topic(), tp.partition());
}
}
}
}
}
Aggregations