use of org.apache.kafka.clients.admin.ListOffsetsResult in project kafka by apache.
the class TopicAdmin method endOffsets.
/**
* Fetch the most recent offset for each of the supplied {@link TopicPartition} objects.
*
* @param partitions the topic partitions
* @return the map of offset for each topic partition, or an empty map if the supplied partitions
* are null or empty
* @throws UnsupportedVersionException if the admin client cannot read end offsets
* @throws TimeoutException if the offset metadata could not be fetched before the amount of time allocated
* by {@code request.timeout.ms} expires, and this call can be retried
* @throws LeaderNotAvailableException if the leader was not available and this call can be retried
* @throws RetriableException if a retriable error occurs, or the thread is interrupted while attempting
* to perform this operation
* @throws ConnectException if a non retriable error occurs
*/
public Map<TopicPartition, Long> endOffsets(Set<TopicPartition> partitions) {
if (partitions == null || partitions.isEmpty()) {
return Collections.emptyMap();
}
Map<TopicPartition, OffsetSpec> offsetSpecMap = partitions.stream().collect(Collectors.toMap(Function.identity(), tp -> OffsetSpec.latest()));
ListOffsetsResult resultFuture = admin.listOffsets(offsetSpecMap);
// Get the individual result for each topic partition so we have better error messages
Map<TopicPartition, Long> result = new HashMap<>();
for (TopicPartition partition : partitions) {
try {
ListOffsetsResultInfo info = resultFuture.partitionResult(partition).get();
result.put(partition, info.offset());
} catch (ExecutionException e) {
Throwable cause = e.getCause();
String topic = partition.topic();
if (cause instanceof AuthorizationException) {
String msg = String.format("Not authorized to get the end offsets for topic '%s' on brokers at %s", topic, bootstrapServers());
throw new ConnectException(msg, e);
} else if (cause instanceof UnsupportedVersionException) {
// Should theoretically never happen, because this method is the same as what the consumer uses and therefore
// should exist in the broker since before the admin client was added
String msg = String.format("API to get the get the end offsets for topic '%s' is unsupported on brokers at %s", topic, bootstrapServers());
throw new UnsupportedVersionException(msg, e);
} else if (cause instanceof TimeoutException) {
String msg = String.format("Timed out while waiting to get end offsets for topic '%s' on brokers at %s", topic, bootstrapServers());
throw new TimeoutException(msg, e);
} else if (cause instanceof LeaderNotAvailableException) {
String msg = String.format("Unable to get end offsets during leader election for topic '%s' on brokers at %s", topic, bootstrapServers());
throw new LeaderNotAvailableException(msg, e);
} else if (cause instanceof org.apache.kafka.common.errors.RetriableException) {
throw (org.apache.kafka.common.errors.RetriableException) cause;
} else {
String msg = String.format("Error while getting end offsets for topic '%s' on brokers at %s", topic, bootstrapServers());
throw new ConnectException(msg, e);
}
} catch (InterruptedException e) {
Thread.interrupted();
String msg = String.format("Interrupted while attempting to read end offsets for topic '%s' on brokers at %s", partition.topic(), bootstrapServers());
throw new RetriableException(msg, e);
}
}
return result;
}
use of org.apache.kafka.clients.admin.ListOffsetsResult in project kafka by apache.
the class KafkaStreamsTest method shouldReturnEmptyLocalStorePartitionLags.
@Test
public void shouldReturnEmptyLocalStorePartitionLags() {
// Mock all calls made to compute the offset lags,
final ListOffsetsResult result = EasyMock.mock(ListOffsetsResult.class);
final KafkaFutureImpl<Map<TopicPartition, ListOffsetsResultInfo>> allFuture = new KafkaFutureImpl<>();
allFuture.complete(Collections.emptyMap());
EasyMock.expect(result.all()).andReturn(allFuture);
final MockAdminClient mockAdminClient = EasyMock.partialMockBuilder(MockAdminClient.class).addMockedMethod("listOffsets", Map.class).createMock();
EasyMock.expect(mockAdminClient.listOffsets(anyObject())).andStubReturn(result);
final MockClientSupplier mockClientSupplier = EasyMock.partialMockBuilder(MockClientSupplier.class).addMockedMethod("getAdmin").createMock();
EasyMock.expect(mockClientSupplier.getAdmin(anyObject())).andReturn(mockAdminClient);
EasyMock.replay(result, mockAdminClient, mockClientSupplier);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, mockClientSupplier, time)) {
streams.start();
assertEquals(0, streams.allLocalStorePartitionLags().size());
}
}
use of org.apache.kafka.clients.admin.ListOffsetsResult in project kafka by apache.
the class AssignmentTestUtils method createMockAdminClientForAssignor.
// If you don't care about setting the end offsets for each specific topic partition, the helper method
// getTopicPartitionOffsetMap is useful for building this input map for all partitions
public static AdminClient createMockAdminClientForAssignor(final Map<TopicPartition, Long> changelogEndOffsets) {
final AdminClient adminClient = EasyMock.createMock(AdminClient.class);
final ListOffsetsResult result = EasyMock.createNiceMock(ListOffsetsResult.class);
final KafkaFutureImpl<Map<TopicPartition, ListOffsetsResultInfo>> allFuture = new KafkaFutureImpl<>();
allFuture.complete(changelogEndOffsets.entrySet().stream().collect(Collectors.toMap(Entry::getKey, t -> {
final ListOffsetsResultInfo info = EasyMock.createNiceMock(ListOffsetsResultInfo.class);
expect(info.offset()).andStubReturn(t.getValue());
EasyMock.replay(info);
return info;
})));
expect(adminClient.listOffsets(anyObject())).andStubReturn(result);
expect(result.all()).andStubReturn(allFuture);
EasyMock.replay(result);
return adminClient;
}
use of org.apache.kafka.clients.admin.ListOffsetsResult in project kafka by apache.
the class ClientUtilsTest method fetchEndOffsetsShouldRethrowInterruptedExceptionAsStreamsException.
@Test
public void fetchEndOffsetsShouldRethrowInterruptedExceptionAsStreamsException() throws Exception {
final Admin adminClient = EasyMock.createMock(AdminClient.class);
final ListOffsetsResult result = EasyMock.createNiceMock(ListOffsetsResult.class);
final KafkaFuture<Map<TopicPartition, ListOffsetsResultInfo>> allFuture = EasyMock.createMock(KafkaFuture.class);
EasyMock.expect(adminClient.listOffsets(EasyMock.anyObject())).andStubReturn(result);
EasyMock.expect(result.all()).andStubReturn(allFuture);
EasyMock.expect(allFuture.get()).andThrow(new InterruptedException());
replay(adminClient, result, allFuture);
assertThrows(StreamsException.class, () -> fetchEndOffsets(PARTITIONS, adminClient));
verify(adminClient);
}
use of org.apache.kafka.clients.admin.ListOffsetsResult in project kafka by apache.
the class ClientUtilsTest method fetchEndOffsetsShouldRethrowRuntimeExceptionAsStreamsException.
@Test
public void fetchEndOffsetsShouldRethrowRuntimeExceptionAsStreamsException() throws Exception {
final Admin adminClient = EasyMock.createMock(AdminClient.class);
final ListOffsetsResult result = EasyMock.createNiceMock(ListOffsetsResult.class);
final KafkaFuture<Map<TopicPartition, ListOffsetsResultInfo>> allFuture = EasyMock.createMock(KafkaFuture.class);
EasyMock.expect(adminClient.listOffsets(EasyMock.anyObject())).andStubReturn(result);
EasyMock.expect(result.all()).andStubReturn(allFuture);
EasyMock.expect(allFuture.get()).andThrow(new RuntimeException());
replay(adminClient, result, allFuture);
assertThrows(StreamsException.class, () -> fetchEndOffsets(PARTITIONS, adminClient));
verify(adminClient);
}
Aggregations