use of org.apache.kafka.clients.consumer.OffsetCommitCallback in project flink by apache.
the class Kafka09FetcherTest method ensureOffsetsGetCommitted.
@Test
public void ensureOffsetsGetCommitted() throws Exception {
// test data
final KafkaTopicPartition testPartition1 = new KafkaTopicPartition("test", 42);
final KafkaTopicPartition testPartition2 = new KafkaTopicPartition("another", 99);
final Map<KafkaTopicPartition, Long> testCommitData1 = new HashMap<>();
testCommitData1.put(testPartition1, 11L);
testCommitData1.put(testPartition2, 18L);
final Map<KafkaTopicPartition, Long> testCommitData2 = new HashMap<>();
testCommitData2.put(testPartition1, 19L);
testCommitData2.put(testPartition2, 28L);
final BlockingQueue<Map<TopicPartition, OffsetAndMetadata>> commitStore = new LinkedBlockingQueue<>();
// ----- the mock consumer with poll(), wakeup(), and commit(A)sync calls ----
final MultiShotLatch blockerLatch = new MultiShotLatch();
KafkaConsumer<?, ?> mockConsumer = mock(KafkaConsumer.class);
when(mockConsumer.poll(anyLong())).thenAnswer(new Answer<ConsumerRecords<?, ?>>() {
@Override
public ConsumerRecords<?, ?> answer(InvocationOnMock invocation) throws InterruptedException {
blockerLatch.await();
return ConsumerRecords.empty();
}
});
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) {
blockerLatch.trigger();
return null;
}
}).when(mockConsumer).wakeup();
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) {
@SuppressWarnings("unchecked") Map<TopicPartition, OffsetAndMetadata> offsets = (Map<TopicPartition, OffsetAndMetadata>) invocation.getArguments()[0];
OffsetCommitCallback callback = (OffsetCommitCallback) invocation.getArguments()[1];
commitStore.add(offsets);
callback.onComplete(offsets, null);
return null;
}
}).when(mockConsumer).commitAsync(Mockito.<Map<TopicPartition, OffsetAndMetadata>>any(), any(OffsetCommitCallback.class));
// make sure the fetcher creates the mock consumer
whenNew(KafkaConsumer.class).withAnyArguments().thenReturn(mockConsumer);
// ----- create the test fetcher -----
@SuppressWarnings("unchecked") SourceContext<String> sourceContext = mock(SourceContext.class);
Map<KafkaTopicPartition, Long> partitionsWithInitialOffsets = Collections.singletonMap(new KafkaTopicPartition("test", 42), KafkaTopicPartitionStateSentinel.GROUP_OFFSET);
KeyedDeserializationSchema<String> schema = new KeyedDeserializationSchemaWrapper<>(new SimpleStringSchema());
final Kafka09Fetcher<String> fetcher = new Kafka09Fetcher<>(sourceContext, partitionsWithInitialOffsets, null, /* periodic watermark extractor */
null, /* punctuated watermark extractor */
new TestProcessingTimeService(), 10, /* watermark interval */
this.getClass().getClassLoader(), "task_name", new UnregisteredMetricsGroup(), schema, new Properties(), 0L, false);
// ----- run the fetcher -----
final AtomicReference<Throwable> error = new AtomicReference<>();
final Thread fetcherRunner = new Thread("fetcher runner") {
@Override
public void run() {
try {
fetcher.runFetchLoop();
} catch (Throwable t) {
error.set(t);
}
}
};
fetcherRunner.start();
// ----- trigger the first offset commit -----
fetcher.commitInternalOffsetsToKafka(testCommitData1);
Map<TopicPartition, OffsetAndMetadata> result1 = commitStore.take();
for (Entry<TopicPartition, OffsetAndMetadata> entry : result1.entrySet()) {
TopicPartition partition = entry.getKey();
if (partition.topic().equals("test")) {
assertEquals(42, partition.partition());
assertEquals(12L, entry.getValue().offset());
} else if (partition.topic().equals("another")) {
assertEquals(99, partition.partition());
assertEquals(17L, entry.getValue().offset());
}
}
// ----- trigger the second offset commit -----
fetcher.commitInternalOffsetsToKafka(testCommitData2);
Map<TopicPartition, OffsetAndMetadata> result2 = commitStore.take();
for (Entry<TopicPartition, OffsetAndMetadata> entry : result2.entrySet()) {
TopicPartition partition = entry.getKey();
if (partition.topic().equals("test")) {
assertEquals(42, partition.partition());
assertEquals(20L, entry.getValue().offset());
} else if (partition.topic().equals("another")) {
assertEquals(99, partition.partition());
assertEquals(27L, entry.getValue().offset());
}
}
// ----- test done, wait till the fetcher is done for a clean shutdown -----
fetcher.cancel();
fetcherRunner.join();
// check that there were no errors in the fetcher
final Throwable caughtError = error.get();
if (caughtError != null && !(caughtError instanceof Handover.ClosedException)) {
throw new Exception("Exception in the fetcher", caughtError);
}
}
use of org.apache.kafka.clients.consumer.OffsetCommitCallback in project kafka by apache.
the class WorkerSinkTaskTest method testPreCommit.
@Test
public void testPreCommit() throws Exception {
expectInitializeTask();
// iter 1
expectPollInitialAssignment();
// iter 2
expectConsumerPoll(2);
expectConversionAndTransformation(2);
sinkTask.put(EasyMock.<Collection<SinkRecord>>anyObject());
EasyMock.expectLastCall();
final Map<TopicPartition, OffsetAndMetadata> workerStartingOffsets = new HashMap<>();
workerStartingOffsets.put(TOPIC_PARTITION, new OffsetAndMetadata(FIRST_OFFSET));
workerStartingOffsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET));
final Map<TopicPartition, OffsetAndMetadata> workerCurrentOffsets = new HashMap<>();
workerCurrentOffsets.put(TOPIC_PARTITION, new OffsetAndMetadata(FIRST_OFFSET + 2));
workerCurrentOffsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET));
final Map<TopicPartition, OffsetAndMetadata> taskOffsets = new HashMap<>();
// act like FIRST_OFFSET+2 has not yet been flushed by the task
taskOffsets.put(TOPIC_PARTITION, new OffsetAndMetadata(FIRST_OFFSET + 1));
// should be ignored because > current offset
taskOffsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET + 1));
// should be ignored because this partition is not assigned
taskOffsets.put(new TopicPartition(TOPIC, 3), new OffsetAndMetadata(FIRST_OFFSET));
final Map<TopicPartition, OffsetAndMetadata> committableOffsets = new HashMap<>();
committableOffsets.put(TOPIC_PARTITION, new OffsetAndMetadata(FIRST_OFFSET + 1));
committableOffsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET));
sinkTask.preCommit(workerCurrentOffsets);
EasyMock.expectLastCall().andReturn(taskOffsets);
final Capture<OffsetCommitCallback> callback = EasyMock.newCapture();
consumer.commitAsync(EasyMock.eq(committableOffsets), EasyMock.capture(callback));
EasyMock.expectLastCall().andAnswer(new IAnswer<Void>() {
@Override
public Void answer() throws Throwable {
callback.getValue().onComplete(committableOffsets, null);
return null;
}
});
expectConsumerPoll(0);
sinkTask.put(EasyMock.<Collection<SinkRecord>>anyObject());
EasyMock.expectLastCall();
PowerMock.replayAll();
workerTask.initialize(TASK_CONFIG);
workerTask.initializeAndStart();
// iter 1 -- initial assignment
workerTask.iteration();
assertEquals(workerStartingOffsets, Whitebox.<Map<TopicPartition, OffsetAndMetadata>>getInternalState(workerTask, "currentOffsets"));
// iter 2 -- deliver 2 records
workerTask.iteration();
assertEquals(workerCurrentOffsets, Whitebox.<Map<TopicPartition, OffsetAndMetadata>>getInternalState(workerTask, "currentOffsets"));
assertEquals(workerStartingOffsets, Whitebox.<Map<TopicPartition, OffsetAndMetadata>>getInternalState(workerTask, "lastCommittedOffsets"));
sinkTaskContext.getValue().requestCommit();
// iter 3 -- commit
workerTask.iteration();
assertEquals(committableOffsets, Whitebox.<Map<TopicPartition, OffsetAndMetadata>>getInternalState(workerTask, "lastCommittedOffsets"));
PowerMock.verifyAll();
}
use of org.apache.kafka.clients.consumer.OffsetCommitCallback in project kafka by apache.
the class WorkerSinkTaskThreadedTest method expectOffsetCommit.
private Capture<OffsetCommitCallback> expectOffsetCommit(final long expectedMessages, final RuntimeException error, final Exception consumerCommitError, final long consumerCommitDelayMs, final boolean invokeCallback) throws Exception {
final long finalOffset = FIRST_OFFSET + expectedMessages;
// All assigned partitions will have offsets committed, but we've only processed messages/updated offsets for one
final Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = new HashMap<>();
offsetsToCommit.put(TOPIC_PARTITION, new OffsetAndMetadata(finalOffset));
offsetsToCommit.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET));
offsetsToCommit.put(TOPIC_PARTITION3, new OffsetAndMetadata(FIRST_OFFSET));
sinkTask.preCommit(offsetsToCommit);
IExpectationSetters<Object> expectation = PowerMock.expectLastCall();
if (error != null) {
expectation.andThrow(error).once();
return null;
} else {
expectation.andReturn(offsetsToCommit);
}
final Capture<OffsetCommitCallback> capturedCallback = EasyMock.newCapture();
consumer.commitAsync(EasyMock.eq(offsetsToCommit), EasyMock.capture(capturedCallback));
PowerMock.expectLastCall().andAnswer(new IAnswer<Object>() {
@Override
public Object answer() throws Throwable {
time.sleep(consumerCommitDelayMs);
if (invokeCallback)
capturedCallback.getValue().onComplete(offsetsToCommit, consumerCommitError);
return null;
}
});
return capturedCallback;
}
use of org.apache.kafka.clients.consumer.OffsetCommitCallback in project kafka by apache.
the class ConsumerCoordinator method doCommitOffsetsAsync.
private void doCommitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback) {
this.subscriptions.needRefreshCommits();
RequestFuture<Void> future = sendOffsetCommitRequest(offsets);
final OffsetCommitCallback cb = callback == null ? defaultOffsetCommitCallback : callback;
future.addListener(new RequestFutureListener<Void>() {
@Override
public void onSuccess(Void value) {
if (interceptors != null)
interceptors.onCommit(offsets);
completedOffsetCommits.add(new OffsetCommitCompletion(cb, offsets, null));
}
@Override
public void onFailure(RuntimeException e) {
Exception commitException = e;
if (e instanceof RetriableException)
commitException = new RetriableCommitFailedException(e);
completedOffsetCommits.add(new OffsetCommitCompletion(cb, offsets, commitException));
}
});
}
Aggregations