use of org.apache.kafka.clients.admin.ListOffsetsResult in project kafka by apache.
the class StreamsPartitionAssignorTest method shouldRequestEndOffsetsForPreexistingChangelogs.
@Test
public void shouldRequestEndOffsetsForPreexistingChangelogs() {
final Set<TopicPartition> changelogs = mkSet(new TopicPartition(APPLICATION_ID + "-store-changelog", 0), new TopicPartition(APPLICATION_ID + "-store-changelog", 1), new TopicPartition(APPLICATION_ID + "-store-changelog", 2));
adminClient = EasyMock.createMock(AdminClient.class);
final ListOffsetsResult result = EasyMock.createNiceMock(ListOffsetsResult.class);
final KafkaFutureImpl<Map<TopicPartition, ListOffsetsResultInfo>> allFuture = new KafkaFutureImpl<>();
allFuture.complete(changelogs.stream().collect(Collectors.toMap(tp -> tp, tp -> {
final ListOffsetsResultInfo info = EasyMock.createNiceMock(ListOffsetsResultInfo.class);
expect(info.offset()).andStubReturn(Long.MAX_VALUE);
EasyMock.replay(info);
return info;
})));
final Capture<Map<TopicPartition, OffsetSpec>> capturedChangelogs = EasyMock.newCapture();
expect(adminClient.listOffsets(EasyMock.capture(capturedChangelogs))).andReturn(result).once();
expect(result.all()).andReturn(allFuture);
builder.addSource(null, "source1", null, null, null, "topic1");
builder.addProcessor("processor1", new MockApiProcessorSupplier<>(), "source1");
builder.addStateStore(new MockKeyValueStoreBuilder("store", false), "processor1");
subscriptions.put("consumer10", new Subscription(singletonList("topic1"), defaultSubscriptionInfo.encode()));
EasyMock.replay(result);
configureDefault();
overwriteInternalTopicManagerWithMock(false);
partitionAssignor.assign(metadata, new GroupSubscription(subscriptions));
EasyMock.verify(adminClient);
assertThat(capturedChangelogs.getValue().keySet(), equalTo(changelogs));
}
use of org.apache.kafka.clients.admin.ListOffsetsResult in project ksql by confluentinc.
the class KafkaEmbedded method getEndOffsets.
/**
* The end offsets for a given collection of TopicPartitions
* @param topicPartitions The collection of TopicPartitions to get end offsets for
* @param isolationLevel The isolation level to use when reading end offsets.
* @return The map of TopicPartition to end offset
*/
public Map<TopicPartition, Long> getEndOffsets(final Collection<TopicPartition> topicPartitions, final IsolationLevel isolationLevel) {
final Map<TopicPartition, OffsetSpec> topicPartitionsWithSpec = topicPartitions.stream().collect(Collectors.toMap(tp -> tp, tp -> OffsetSpec.latest()));
try (AdminClient adminClient = adminClient()) {
final ListOffsetsResult listOffsetsResult = adminClient.listOffsets(topicPartitionsWithSpec, new ListOffsetsOptions(isolationLevel));
final Map<TopicPartition, ListOffsetsResult.ListOffsetsResultInfo> partitionResultMap = listOffsetsResult.all().get();
return partitionResultMap.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().offset()));
} catch (final Exception e) {
throw new RuntimeException(e);
}
}
use of org.apache.kafka.clients.admin.ListOffsetsResult in project ksql by confluentinc.
the class KsqlEngine method getStartOffsetsForStreamPullQuery.
private ImmutableMap<TopicPartition, Long> getStartOffsetsForStreamPullQuery(final Admin admin, final TopicDescription topicDescription) {
final Map<TopicPartition, OffsetSpec> topicPartitions = topicDescription.partitions().stream().map(td -> new TopicPartition(topicDescription.name(), td.partition())).collect(toMap(identity(), tp -> OffsetSpec.earliest()));
final ListOffsetsResult listOffsetsResult = admin.listOffsets(topicPartitions, // so we should do the same when checking end offsets.
new ListOffsetsOptions(IsolationLevel.READ_UNCOMMITTED));
try {
final Map<TopicPartition, ListOffsetsResult.ListOffsetsResultInfo> partitionResultMap = listOffsetsResult.all().get(10, TimeUnit.SECONDS);
final Map<TopicPartition, Long> result = partitionResultMap.entrySet().stream().collect(toMap(Entry::getKey, e -> e.getValue().offset()));
return ImmutableMap.copyOf(result);
} catch (final InterruptedException e) {
log.error("Admin#listOffsets(" + topicDescription.name() + ") interrupted", e);
throw new KsqlServerException("Interrupted");
} catch (final ExecutionException e) {
log.error("Error executing Admin#listOffsets(" + topicDescription.name() + ")", e);
throw new KsqlServerException("Internal Server Error");
} catch (final TimeoutException e) {
log.error("Admin#listOffsets(" + topicDescription.name() + ") timed out", e);
throw new KsqlServerException("Backend timed out");
}
}
Aggregations