use of org.apache.kafka.common.KafkaException in project kafka by apache.
the class ConsumerCoordinator method onJoinComplete.
@Override
protected void onJoinComplete(int generation, String memberId, String assignmentStrategy, ByteBuffer assignmentBuffer) {
log.debug("Executing onJoinComplete with generation {} and memberId {}", generation, memberId);
// Only the leader is responsible for monitoring for metadata changes (i.e. partition changes)
if (!isLeader)
assignmentSnapshot = null;
ConsumerPartitionAssignor assignor = lookupAssignor(assignmentStrategy);
if (assignor == null)
throw new IllegalStateException("Coordinator selected invalid assignment protocol: " + assignmentStrategy);
// Give the assignor a chance to update internal state based on the received assignment
groupMetadata = new ConsumerGroupMetadata(rebalanceConfig.groupId, generation, memberId, rebalanceConfig.groupInstanceId);
SortedSet<TopicPartition> ownedPartitions = new TreeSet<>(COMPARATOR);
ownedPartitions.addAll(subscriptions.assignedPartitions());
// should at least encode the short version
if (assignmentBuffer.remaining() < 2)
throw new IllegalStateException("There are insufficient bytes available to read assignment from the sync-group response (" + "actual byte size " + assignmentBuffer.remaining() + ") , this is not expected; " + "it is possible that the leader's assign function is buggy and did not return any assignment for this member, " + "or because static member is configured and the protocol is buggy hence did not get the assignment for this member");
Assignment assignment = ConsumerProtocol.deserializeAssignment(assignmentBuffer);
SortedSet<TopicPartition> assignedPartitions = new TreeSet<>(COMPARATOR);
assignedPartitions.addAll(assignment.partitions());
if (!subscriptions.checkAssignmentMatchedSubscription(assignedPartitions)) {
final String reason = String.format("received assignment %s does not match the current subscription %s; " + "it is likely that the subscription has changed since we joined the group, will re-join with current subscription", assignment.partitions(), subscriptions.prettyString());
requestRejoin(reason);
return;
}
final AtomicReference<Exception> firstException = new AtomicReference<>(null);
SortedSet<TopicPartition> addedPartitions = new TreeSet<>(COMPARATOR);
addedPartitions.addAll(assignedPartitions);
addedPartitions.removeAll(ownedPartitions);
if (protocol == RebalanceProtocol.COOPERATIVE) {
SortedSet<TopicPartition> revokedPartitions = new TreeSet<>(COMPARATOR);
revokedPartitions.addAll(ownedPartitions);
revokedPartitions.removeAll(assignedPartitions);
log.info("Updating assignment with\n" + "\tAssigned partitions: {}\n" + "\tCurrent owned partitions: {}\n" + "\tAdded partitions (assigned - owned): {}\n" + "\tRevoked partitions (owned - assigned): {}\n", assignedPartitions, ownedPartitions, addedPartitions, revokedPartitions);
if (!revokedPartitions.isEmpty()) {
// Revoke partitions that were previously owned but no longer assigned;
// note that we should only change the assignment (or update the assignor's state)
// AFTER we've triggered the revoke callback
firstException.compareAndSet(null, invokePartitionsRevoked(revokedPartitions));
// If revoked any partitions, need to re-join the group afterwards
final String reason = String.format("need to revoke partitions %s as indicated " + "by the current assignment and re-join", revokedPartitions);
requestRejoin(reason);
}
}
// The leader may have assigned partitions which match our subscription pattern, but which
// were not explicitly requested, so we update the joined subscription here.
maybeUpdateJoinedSubscription(assignedPartitions);
// Catch any exception here to make sure we could complete the user callback.
firstException.compareAndSet(null, invokeOnAssignment(assignor, assignment));
// Reschedule the auto commit starting from now
if (autoCommitEnabled)
this.nextAutoCommitTimer.updateAndReset(autoCommitIntervalMs);
subscriptions.assignFromSubscribed(assignedPartitions);
// Add partitions that were not previously owned but are now assigned
firstException.compareAndSet(null, invokePartitionsAssigned(addedPartitions));
if (firstException.get() != null) {
if (firstException.get() instanceof KafkaException) {
throw (KafkaException) firstException.get();
} else {
throw new KafkaException("User rebalance callback throws an error", firstException.get());
}
}
}
use of org.apache.kafka.common.KafkaException in project kafka by apache.
the class ConsumerCoordinator method onJoinPrepare.
@Override
protected boolean onJoinPrepare(int generation, String memberId) {
log.debug("Executing onJoinPrepare with generation {} and memberId {}", generation, memberId);
boolean onJoinPrepareAsyncCommitCompleted = false;
// async commit offsets prior to rebalance if auto-commit enabled
RequestFuture<Void> future = maybeAutoCommitOffsetsAsync();
// 3. offset commit failed with non-retriable exception
if (future == null)
onJoinPrepareAsyncCommitCompleted = true;
else if (future.succeeded())
onJoinPrepareAsyncCommitCompleted = true;
else if (future.failed() && !future.isRetriable()) {
log.error("Asynchronous auto-commit of offsets failed: {}", future.exception().getMessage());
onJoinPrepareAsyncCommitCompleted = true;
}
// the generation / member-id can possibly be reset by the heartbeat thread
// upon getting errors or heartbeat timeouts; in this case whatever is previously
// owned partitions would be lost, we should trigger the callback and cleanup the assignment;
// otherwise we can proceed normally and revoke the partitions depending on the protocol,
// and in that case we should only change the assignment AFTER the revoke callback is triggered
// so that users can still access the previously owned partitions to commit offsets etc.
Exception exception = null;
final SortedSet<TopicPartition> revokedPartitions = new TreeSet<>(COMPARATOR);
if (generation == Generation.NO_GENERATION.generationId || memberId.equals(Generation.NO_GENERATION.memberId)) {
revokedPartitions.addAll(subscriptions.assignedPartitions());
if (!revokedPartitions.isEmpty()) {
log.info("Giving away all assigned partitions as lost since generation/memberID has been reset," + "indicating that consumer is in old state or no longer part of the group");
exception = invokePartitionsLost(revokedPartitions);
subscriptions.assignFromSubscribed(Collections.emptySet());
}
} else {
switch(protocol) {
case EAGER:
// revoke all partitions
revokedPartitions.addAll(subscriptions.assignedPartitions());
exception = invokePartitionsRevoked(revokedPartitions);
subscriptions.assignFromSubscribed(Collections.emptySet());
break;
case COOPERATIVE:
// only revoke those partitions that are not in the subscription any more.
Set<TopicPartition> ownedPartitions = new HashSet<>(subscriptions.assignedPartitions());
revokedPartitions.addAll(ownedPartitions.stream().filter(tp -> !subscriptions.subscription().contains(tp.topic())).collect(Collectors.toSet()));
if (!revokedPartitions.isEmpty()) {
exception = invokePartitionsRevoked(revokedPartitions);
ownedPartitions.removeAll(revokedPartitions);
subscriptions.assignFromSubscribed(ownedPartitions);
}
break;
}
}
isLeader = false;
subscriptions.resetGroupSubscription();
if (exception != null) {
throw new KafkaException("User rebalance callback throws an error", exception);
}
return onJoinPrepareAsyncCommitCompleted;
}
use of org.apache.kafka.common.KafkaException in project kafka by apache.
the class KafkaBasedLog method stop.
public void stop() {
log.info("Stopping KafkaBasedLog for topic " + topic);
synchronized (this) {
stopRequested = true;
}
consumer.wakeup();
try {
thread.join();
} catch (InterruptedException e) {
throw new ConnectException("Failed to stop KafkaBasedLog. Exiting without cleanly shutting " + "down it's producer and consumer.", e);
}
try {
producer.close();
} catch (KafkaException e) {
log.error("Failed to stop KafkaBasedLog producer", e);
}
try {
consumer.close();
} catch (KafkaException e) {
log.error("Failed to stop KafkaBasedLog consumer", e);
}
// do not close the admin client, since we don't own it
admin = null;
log.info("Stopped KafkaBasedLog for topic " + topic);
}
use of org.apache.kafka.common.KafkaException in project kafka by apache.
the class LegacyRecord method write.
private static void write(ByteBuffer buffer, byte magic, long timestamp, ByteBuffer key, ByteBuffer value, CompressionType compressionType, TimestampType timestampType) {
try {
DataOutputStream out = new DataOutputStream(new ByteBufferOutputStream(buffer));
write(out, magic, timestamp, key, value, compressionType, timestampType);
} catch (IOException e) {
throw new KafkaException(e);
}
}
use of org.apache.kafka.common.KafkaException in project kafka by apache.
the class FileRecords method writeTo.
@Override
public long writeTo(TransferableChannel destChannel, long offset, int length) throws IOException {
long newSize = Math.min(channel.size(), end) - start;
int oldSize = sizeInBytes();
if (newSize < oldSize)
throw new KafkaException(String.format("Size of FileRecords %s has been truncated during write: old size %d, new size %d", file.getAbsolutePath(), oldSize, newSize));
long position = start + offset;
long count = Math.min(length, oldSize - offset);
return destChannel.transferFrom(channel, position, count);
}
Aggregations