use of org.apache.kafka.common.errors.WakeupException in project kafka by apache.
the class MockConsumer method poll.
@Override
public ConsumerRecords<K, V> poll(long timeout) {
ensureNotClosed();
// the callback
synchronized (pollTasks) {
Runnable task = pollTasks.poll();
if (task != null)
task.run();
}
if (wakeup.get()) {
wakeup.set(false);
throw new WakeupException();
}
if (exception != null) {
RuntimeException exception = this.exception;
this.exception = null;
throw exception;
}
// Handle seeks that need to wait for a poll() call to be processed
for (TopicPartition tp : subscriptions.missingFetchPositions()) updateFetchPosition(tp);
// update the consumed offset
final Map<TopicPartition, List<ConsumerRecord<K, V>>> results = new HashMap<>();
for (final TopicPartition topicPartition : records.keySet()) {
results.put(topicPartition, new ArrayList<ConsumerRecord<K, V>>());
}
for (Map.Entry<TopicPartition, List<ConsumerRecord<K, V>>> entry : this.records.entrySet()) {
if (!subscriptions.isPaused(entry.getKey())) {
final List<ConsumerRecord<K, V>> recs = entry.getValue();
for (final ConsumerRecord<K, V> rec : recs) {
if (assignment().contains(entry.getKey()) && rec.offset() >= subscriptions.position(entry.getKey())) {
results.get(entry.getKey()).add(rec);
subscriptions.position(entry.getKey(), rec.offset() + 1);
}
}
}
}
this.records.clear();
return new ConsumerRecords<>(results);
}
use of org.apache.kafka.common.errors.WakeupException in project kafka by apache.
the class KafkaBasedLogTest method testReloadOnStartWithNoNewRecordsPresent.
@Test
public void testReloadOnStartWithNoNewRecordsPresent() throws Exception {
expectStart();
expectStop();
PowerMock.replayAll();
Map<TopicPartition, Long> endOffsets = new HashMap<>();
endOffsets.put(TP0, 7L);
endOffsets.put(TP1, 7L);
consumer.updateEndOffsets(endOffsets);
// Better test with an advanced offset other than just 0L
consumer.updateBeginningOffsets(endOffsets);
consumer.schedulePollTask(new Runnable() {
@Override
public void run() {
// time outs (for instance via ConnectRestException)
throw new WakeupException();
}
});
store.start();
assertEquals(CONSUMER_ASSIGNMENT, consumer.assignment());
assertEquals(7L, consumer.position(TP0));
assertEquals(7L, consumer.position(TP1));
store.stop();
assertFalse(Whitebox.<Thread>getInternalState(store, "thread").isAlive());
assertTrue(consumer.closed());
PowerMock.verifyAll();
}
use of org.apache.kafka.common.errors.WakeupException in project kafka by apache.
the class AbstractTaskTest method shouldThrowWakeupExceptionOnInitializeOffsetsWhenWakeupException.
@Test(expected = WakeupException.class)
public void shouldThrowWakeupExceptionOnInitializeOffsetsWhenWakeupException() throws Exception {
final Consumer consumer = mockConsumer(new WakeupException());
final AbstractTask task = createTask(consumer);
task.initializeOffsetLimits();
}
use of org.apache.kafka.common.errors.WakeupException in project kafka by apache.
the class AbstractCoordinatorTest method testWakeupAfterSyncGroupSent.
@Test
public void testWakeupAfterSyncGroupSent() throws Exception {
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
mockClient.prepareResponse(joinGroupFollowerResponse(1, "memberId", "leaderId", Errors.NONE));
mockClient.prepareResponse(new MockClient.RequestMatcher() {
private int invocations = 0;
@Override
public boolean matches(AbstractRequest body) {
invocations++;
boolean isSyncGroupRequest = body instanceof SyncGroupRequest;
if (isSyncGroupRequest && invocations == 1)
// simulate wakeup after the request sent
throw new WakeupException();
return isSyncGroupRequest;
}
}, syncGroupResponse(Errors.NONE));
AtomicBoolean heartbeatReceived = prepareFirstHeartbeat();
try {
coordinator.ensureActiveGroup();
fail("Should have woken up from ensureActiveGroup()");
} catch (WakeupException e) {
}
assertEquals(1, coordinator.onJoinPrepareInvokes);
assertEquals(0, coordinator.onJoinCompleteInvokes);
assertFalse(heartbeatReceived.get());
coordinator.ensureActiveGroup();
assertEquals(1, coordinator.onJoinPrepareInvokes);
assertEquals(1, coordinator.onJoinCompleteInvokes);
awaitFirstHeartbeat(heartbeatReceived);
}
use of org.apache.kafka.common.errors.WakeupException in project flink by apache.
the class KafkaConsumerThread method run.
// ------------------------------------------------------------------------
@Override
public void run() {
// early exit check
if (!running) {
return;
}
// this is the means to talk to FlinkKafkaConsumer's main thread
final Handover handover = this.handover;
// This method initializes the KafkaConsumer and guarantees it is torn down properly.
// This is important, because the consumer has multi-threading issues,
// including concurrent 'close()' calls.
final KafkaConsumer<byte[], byte[]> consumer;
try {
consumer = new KafkaConsumer<>(kafkaProperties);
} catch (Throwable t) {
handover.reportError(t);
return;
}
// from here on, the consumer is guaranteed to be closed properly
try {
// The callback invoked by Kafka once an offset commit is complete
final OffsetCommitCallback offsetCommitCallback = new CommitCallback();
// tell the consumer which partitions to work with
consumerCallBridge.assignPartitions(consumer, convertKafkaPartitions(subscribedPartitionStates));
// register Kafka's very own metrics in Flink's metric reporters
if (useMetrics) {
// register Kafka metrics to Flink
Map<MetricName, ? extends Metric> metrics = consumer.metrics();
if (metrics == null) {
// MapR's Kafka implementation returns null here.
log.info("Consumer implementation does not support metrics");
} else {
// we have Kafka metrics, register them
for (Map.Entry<MetricName, ? extends Metric> metric : metrics.entrySet()) {
kafkaMetricGroup.gauge(metric.getKey().name(), new KafkaMetricWrapper(metric.getValue()));
}
}
}
// early exit check
if (!running) {
return;
}
// values yet; replace those with actual offsets, according to what the sentinel value represent.
for (KafkaTopicPartitionState<TopicPartition> partition : subscribedPartitionStates) {
if (partition.getOffset() == KafkaTopicPartitionStateSentinel.EARLIEST_OFFSET) {
consumerCallBridge.seekPartitionToBeginning(consumer, partition.getKafkaPartitionHandle());
partition.setOffset(consumer.position(partition.getKafkaPartitionHandle()) - 1);
} else if (partition.getOffset() == KafkaTopicPartitionStateSentinel.LATEST_OFFSET) {
consumerCallBridge.seekPartitionToEnd(consumer, partition.getKafkaPartitionHandle());
partition.setOffset(consumer.position(partition.getKafkaPartitionHandle()) - 1);
} else if (partition.getOffset() == KafkaTopicPartitionStateSentinel.GROUP_OFFSET) {
// the KafkaConsumer by default will automatically seek the consumer position
// to the committed group offset, so we do not need to do it.
partition.setOffset(consumer.position(partition.getKafkaPartitionHandle()) - 1);
} else {
consumer.seek(partition.getKafkaPartitionHandle(), partition.getOffset() + 1);
}
}
// from now on, external operations may call the consumer
this.consumer = consumer;
// the latest bulk of records. may carry across the loop if the thread is woken up
// from blocking on the handover
ConsumerRecords<byte[], byte[]> records = null;
// main fetch loop
while (running) {
// check if there is something to commit
if (!commitInProgress) {
// get and reset the work-to-be committed, so we don't repeatedly commit the same
final Map<TopicPartition, OffsetAndMetadata> toCommit = nextOffsetsToCommit.getAndSet(null);
if (toCommit != null) {
log.debug("Sending async offset commit request to Kafka broker");
// also record that a commit is already in progress
// the order here matters! first set the flag, then send the commit command.
commitInProgress = true;
consumer.commitAsync(toCommit, offsetCommitCallback);
}
}
// get the next batch of records, unless we did not manage to hand the old batch over
if (records == null) {
try {
records = consumer.poll(pollTimeout);
} catch (WakeupException we) {
continue;
}
}
try {
handover.produce(records);
records = null;
} catch (Handover.WakeupException e) {
// fall through the loop
}
}
// end main fetch loop
} catch (Throwable t) {
// let the main thread know and exit
// it may be that this exception comes because the main thread closed the handover, in
// which case the below reporting is irrelevant, but does not hurt either
handover.reportError(t);
} finally {
// make sure the handover is closed if it is not already closed or has an error
handover.close();
// make sure the KafkaConsumer is closed
try {
consumer.close();
} catch (Throwable t) {
log.warn("Error while closing Kafka consumer", t);
}
}
}
Aggregations