use of org.apache.kafka.common.errors.WakeupException in project apache-kafka-on-k8s by banzaicloud.
the class AbstractCoordinatorTest method testWakeupAfterSyncGroupReceivedExternalCompletion.
@Test
public void testWakeupAfterSyncGroupReceivedExternalCompletion() throws Exception {
setupCoordinator(RETRY_BACKOFF_MS);
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
mockClient.prepareResponse(joinGroupFollowerResponse(1, "memberId", "leaderId", Errors.NONE));
mockClient.prepareResponse(new MockClient.RequestMatcher() {
@Override
public boolean matches(AbstractRequest body) {
boolean isSyncGroupRequest = body instanceof SyncGroupRequest;
if (isSyncGroupRequest)
// wakeup after the request returns
consumerClient.wakeup();
return isSyncGroupRequest;
}
}, syncGroupResponse(Errors.NONE));
AtomicBoolean heartbeatReceived = prepareFirstHeartbeat();
try {
coordinator.ensureActiveGroup();
fail("Should have woken up from ensureActiveGroup()");
} catch (WakeupException e) {
}
assertEquals(1, coordinator.onJoinPrepareInvokes);
assertEquals(0, coordinator.onJoinCompleteInvokes);
assertFalse(heartbeatReceived.get());
coordinator.ensureActiveGroup();
assertEquals(1, coordinator.onJoinPrepareInvokes);
assertEquals(1, coordinator.onJoinCompleteInvokes);
awaitFirstHeartbeat(heartbeatReceived);
}
use of org.apache.kafka.common.errors.WakeupException in project kafka by apache.
the class MirrorSourceTask method poll.
@Override
public List<SourceRecord> poll() {
if (!consumerAccess.tryAcquire()) {
return null;
}
if (stopping) {
return null;
}
try {
ConsumerRecords<byte[], byte[]> records = consumer.poll(pollTimeout);
List<SourceRecord> sourceRecords = new ArrayList<>(records.count());
for (ConsumerRecord<byte[], byte[]> record : records) {
SourceRecord converted = convertRecord(record);
sourceRecords.add(converted);
TopicPartition topicPartition = new TopicPartition(converted.topic(), converted.kafkaPartition());
metrics.recordAge(topicPartition, System.currentTimeMillis() - record.timestamp());
metrics.recordBytes(topicPartition, byteSize(record.value()));
}
if (sourceRecords.isEmpty()) {
// WorkerSourceTasks expects non-zero batch size
return null;
} else {
log.trace("Polled {} records from {}.", sourceRecords.size(), records.partitions());
return sourceRecords;
}
} catch (WakeupException e) {
return null;
} catch (KafkaException e) {
log.warn("Failure during poll.", e);
return null;
} catch (Throwable e) {
log.error("Failure during poll.", e);
// allow Connect to deal with the exception
throw e;
} finally {
consumerAccess.release();
}
}
use of org.apache.kafka.common.errors.WakeupException in project kafka by apache.
the class ConsumerCoordinator method invokePartitionsAssigned.
private Exception invokePartitionsAssigned(final SortedSet<TopicPartition> assignedPartitions) {
log.info("Adding newly assigned partitions: {}", Utils.join(assignedPartitions, ", "));
ConsumerRebalanceListener listener = subscriptions.rebalanceListener();
try {
final long startMs = time.milliseconds();
listener.onPartitionsAssigned(assignedPartitions);
sensors.assignCallbackSensor.record(time.milliseconds() - startMs);
} catch (WakeupException | InterruptException e) {
throw e;
} catch (Exception e) {
log.error("User provided listener {} failed on invocation of onPartitionsAssigned for partitions {}", listener.getClass().getName(), assignedPartitions, e);
return e;
}
return null;
}
use of org.apache.kafka.common.errors.WakeupException in project kafka by apache.
the class MockConsumer method poll.
@Override
public synchronized ConsumerRecords<K, V> poll(final Duration timeout) {
ensureNotClosed();
lastPollTimeout = timeout;
// the callback
synchronized (pollTasks) {
Runnable task = pollTasks.poll();
if (task != null)
task.run();
}
if (wakeup.get()) {
wakeup.set(false);
throw new WakeupException();
}
if (pollException != null) {
RuntimeException exception = this.pollException;
this.pollException = null;
throw exception;
}
// Handle seeks that need to wait for a poll() call to be processed
for (TopicPartition tp : subscriptions.assignedPartitions()) if (!subscriptions.hasValidPosition(tp))
updateFetchPosition(tp);
// update the consumed offset
final Map<TopicPartition, List<ConsumerRecord<K, V>>> results = new HashMap<>();
final List<TopicPartition> toClear = new ArrayList<>();
for (Map.Entry<TopicPartition, List<ConsumerRecord<K, V>>> entry : this.records.entrySet()) {
if (!subscriptions.isPaused(entry.getKey())) {
final List<ConsumerRecord<K, V>> recs = entry.getValue();
for (final ConsumerRecord<K, V> rec : recs) {
long position = subscriptions.position(entry.getKey()).offset;
if (beginningOffsets.get(entry.getKey()) != null && beginningOffsets.get(entry.getKey()) > position) {
throw new OffsetOutOfRangeException(Collections.singletonMap(entry.getKey(), position));
}
if (assignment().contains(entry.getKey()) && rec.offset() >= position) {
results.computeIfAbsent(entry.getKey(), partition -> new ArrayList<>()).add(rec);
Metadata.LeaderAndEpoch leaderAndEpoch = new Metadata.LeaderAndEpoch(Optional.empty(), rec.leaderEpoch());
SubscriptionState.FetchPosition newPosition = new SubscriptionState.FetchPosition(rec.offset() + 1, rec.leaderEpoch(), leaderAndEpoch);
subscriptions.position(entry.getKey(), newPosition);
}
}
toClear.add(entry.getKey());
}
}
toClear.forEach(p -> this.records.remove(p));
return new ConsumerRecords<>(results);
}
use of org.apache.kafka.common.errors.WakeupException in project kafka by apache.
the class ConsumerCoordinatorTest method testWakeupFromAssignmentCallback.
@Test
public void testWakeupFromAssignmentCallback() {
final String topic = "topic1";
TopicPartition partition = new TopicPartition(topic, 0);
final String consumerId = "follower";
Set<String> topics = Collections.singleton(topic);
MockRebalanceListener rebalanceListener = new MockRebalanceListener() {
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
boolean raiseWakeup = this.assignedCount == 0;
super.onPartitionsAssigned(partitions);
if (raiseWakeup)
throw new WakeupException();
}
};
subscriptions.subscribe(topics, rebalanceListener);
// we only have metadata for one topic initially
client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, singletonMap(topic1, 1)));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// prepare initial rebalance
partitionAssignor.prepare(singletonMap(consumerId, Collections.singletonList(partition)));
client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE));
client.prepareResponse(syncGroupResponse(Collections.singletonList(partition), Errors.NONE));
// The first call to poll should raise the exception from the rebalance listener
try {
coordinator.poll(time.timer(Long.MAX_VALUE));
fail("Expected exception thrown from assignment callback");
} catch (WakeupException e) {
}
// The second call should retry the assignment callback and succeed
coordinator.poll(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(0, rebalanceListener.revokedCount);
assertEquals(2, rebalanceListener.assignedCount);
}
Aggregations