use of org.apache.kafka.common.errors.WakeupException in project kafka by apache.
the class AbstractTaskTest method shouldThrowWakeupExceptionOnInitializeOffsetsWhenWakeupException.
@Test(expected = WakeupException.class)
public void shouldThrowWakeupExceptionOnInitializeOffsetsWhenWakeupException() throws Exception {
final Consumer consumer = mockConsumer(new WakeupException());
final AbstractTask task = createTask(consumer);
task.initializeOffsetLimits();
}
use of org.apache.kafka.common.errors.WakeupException in project kafka by apache.
the class ConsumerCoordinator method onJoinPrepare.
@Override
protected void onJoinPrepare(int generation, String memberId) {
// commit offsets prior to rebalance if auto-commit enabled
maybeAutoCommitOffsetsSync(rebalanceTimeoutMs);
// execute the user's callback before rebalance
ConsumerRebalanceListener listener = subscriptions.listener();
log.info("Revoking previously assigned partitions {} for group {}", subscriptions.assignedPartitions(), groupId);
try {
Set<TopicPartition> revoked = new HashSet<>(subscriptions.assignedPartitions());
listener.onPartitionsRevoked(revoked);
} catch (WakeupException | InterruptException e) {
throw e;
} catch (Exception e) {
log.error("User provided listener {} for group {} failed on partition revocation", listener.getClass().getName(), groupId, e);
}
isLeader = false;
subscriptions.resetGroupSubscription();
}
use of org.apache.kafka.common.errors.WakeupException in project kafka by apache.
the class MockConsumer method poll.
@Override
public ConsumerRecords<K, V> poll(long timeout) {
ensureNotClosed();
// the callback
synchronized (pollTasks) {
Runnable task = pollTasks.poll();
if (task != null)
task.run();
}
if (wakeup.get()) {
wakeup.set(false);
throw new WakeupException();
}
if (exception != null) {
RuntimeException exception = this.exception;
this.exception = null;
throw exception;
}
// Handle seeks that need to wait for a poll() call to be processed
for (TopicPartition tp : subscriptions.missingFetchPositions()) updateFetchPosition(tp);
// update the consumed offset
final Map<TopicPartition, List<ConsumerRecord<K, V>>> results = new HashMap<>();
for (final TopicPartition topicPartition : records.keySet()) {
results.put(topicPartition, new ArrayList<ConsumerRecord<K, V>>());
}
for (Map.Entry<TopicPartition, List<ConsumerRecord<K, V>>> entry : this.records.entrySet()) {
if (!subscriptions.isPaused(entry.getKey())) {
final List<ConsumerRecord<K, V>> recs = entry.getValue();
for (final ConsumerRecord<K, V> rec : recs) {
if (assignment().contains(entry.getKey()) && rec.offset() >= subscriptions.position(entry.getKey())) {
results.get(entry.getKey()).add(rec);
subscriptions.position(entry.getKey(), rec.offset() + 1);
}
}
}
}
this.records.clear();
return new ConsumerRecords<>(results);
}
use of org.apache.kafka.common.errors.WakeupException in project kafka by apache.
the class ConsumerCoordinator method onJoinComplete.
@Override
protected void onJoinComplete(int generation, String memberId, String assignmentStrategy, ByteBuffer assignmentBuffer) {
// only the leader is responsible for monitoring for metadata changes (i.e. partition changes)
if (!isLeader)
assignmentSnapshot = null;
PartitionAssignor assignor = lookupAssignor(assignmentStrategy);
if (assignor == null)
throw new IllegalStateException("Coordinator selected invalid assignment protocol: " + assignmentStrategy);
Assignment assignment = ConsumerProtocol.deserializeAssignment(assignmentBuffer);
// set the flag to refresh last committed offsets
subscriptions.needRefreshCommits();
// update partition assignment
subscriptions.assignFromSubscribed(assignment.partitions());
// check if the assignment contains some topics that were not in the original
// subscription, if yes we will obey what leader has decided and add these topics
// into the subscriptions as long as they still match the subscribed pattern
//
// TODO this part of the logic should be removed once we allow regex on leader assign
Set<String> addedTopics = new HashSet<>();
for (TopicPartition tp : subscriptions.assignedPartitions()) {
if (!joinedSubscription.contains(tp.topic()))
addedTopics.add(tp.topic());
}
if (!addedTopics.isEmpty()) {
Set<String> newSubscription = new HashSet<>(subscriptions.subscription());
Set<String> newJoinedSubscription = new HashSet<>(joinedSubscription);
newSubscription.addAll(addedTopics);
newJoinedSubscription.addAll(addedTopics);
this.subscriptions.subscribeFromPattern(newSubscription);
this.joinedSubscription = newJoinedSubscription;
}
// update the metadata and enforce a refresh to make sure the fetcher can start
// fetching data in the next iteration
this.metadata.setTopics(subscriptions.groupSubscription());
client.ensureFreshMetadata();
// give the assignor a chance to update internal state based on the received assignment
assignor.onAssignment(assignment);
// reschedule the auto commit starting from now
this.nextAutoCommitDeadline = time.milliseconds() + autoCommitIntervalMs;
// execute the user's callback after rebalance
ConsumerRebalanceListener listener = subscriptions.listener();
log.info("Setting newly assigned partitions {} for group {}", subscriptions.assignedPartitions(), groupId);
try {
Set<TopicPartition> assigned = new HashSet<>(subscriptions.assignedPartitions());
listener.onPartitionsAssigned(assigned);
} catch (WakeupException | InterruptException e) {
throw e;
} catch (Exception e) {
log.error("User provided listener {} for group {} failed on partition assignment", listener.getClass().getName(), groupId, e);
}
}
use of org.apache.kafka.common.errors.WakeupException in project kafka by apache.
the class KafkaBasedLogTest method testReloadOnStartWithNoNewRecordsPresent.
@Test
public void testReloadOnStartWithNoNewRecordsPresent() throws Exception {
expectStart();
expectStop();
PowerMock.replayAll();
Map<TopicPartition, Long> endOffsets = new HashMap<>();
endOffsets.put(TP0, 7L);
endOffsets.put(TP1, 7L);
consumer.updateEndOffsets(endOffsets);
// Better test with an advanced offset other than just 0L
consumer.updateBeginningOffsets(endOffsets);
consumer.schedulePollTask(new Runnable() {
@Override
public void run() {
// time outs (for instance via ConnectRestException)
throw new WakeupException();
}
});
store.start();
assertEquals(CONSUMER_ASSIGNMENT, consumer.assignment());
assertEquals(7L, consumer.position(TP0));
assertEquals(7L, consumer.position(TP1));
store.stop();
assertFalse(Whitebox.<Thread>getInternalState(store, "thread").isAlive());
assertTrue(consumer.closed());
PowerMock.verifyAll();
}
Aggregations