Search in sources :

Example 1 with ListOffsetsOptions

use of org.apache.kafka.clients.admin.ListOffsetsOptions in project kafka by apache.

the class StoreChangelogReaderTest method shouldThrowIfEndOffsetsFail.

@Test
public void shouldThrowIfEndOffsetsFail() {
    EasyMock.expect(storeMetadata.offset()).andReturn(10L).anyTimes();
    EasyMock.replay(activeStateManager, storeMetadata, store);
    final MockAdminClient adminClient = new MockAdminClient() {

        @Override
        public ListOffsetsResult listOffsets(final Map<TopicPartition, OffsetSpec> topicPartitionOffsets, final ListOffsetsOptions options) {
            throw kaboom;
        }
    };
    adminClient.updateEndOffsets(Collections.singletonMap(tp, 0L));
    final StoreChangelogReader changelogReader = new StoreChangelogReader(time, config, logContext, adminClient, consumer, callback);
    changelogReader.register(tp, activeStateManager);
    final StreamsException thrown = assertThrows(StreamsException.class, () -> changelogReader.restore(Collections.emptyMap()));
    assertEquals(kaboom, thrown.getCause());
}
Also used : ListOffsetsOptions(org.apache.kafka.clients.admin.ListOffsetsOptions) StreamsException(org.apache.kafka.streams.errors.StreamsException) MockAdminClient(org.apache.kafka.clients.admin.MockAdminClient) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) Map(java.util.Map) Collections.singletonMap(java.util.Collections.singletonMap) Test(org.junit.Test)

Example 2 with ListOffsetsOptions

use of org.apache.kafka.clients.admin.ListOffsetsOptions in project kafka by apache.

the class GetListOffsetsCallsBenchmark method setup.

@Setup(Level.Trial)
public void setup() {
    MetadataResponseData data = new MetadataResponseData();
    List<MetadataResponseTopic> mrTopicList = new ArrayList<>();
    Set<String> topics = new HashSet<>();
    for (int topicIndex = 0; topicIndex < topicCount; topicIndex++) {
        Uuid topicId = Uuid.randomUuid();
        String topicName = "topic-" + topicIndex;
        MetadataResponseTopic mrTopic = new MetadataResponseTopic().setTopicId(topicId).setName(topicName).setErrorCode((short) 0).setIsInternal(false);
        List<MetadataResponsePartition> mrPartitionList = new ArrayList<>();
        for (int partition = 0; partition < partitionCount; partition++) {
            TopicPartition tp = new TopicPartition(topicName, partition);
            topics.add(tp.topic());
            futures.put(tp, new KafkaFutureImpl<>());
            topicPartitionOffsets.put(tp, OffsetSpec.latest());
            MetadataResponsePartition mrPartition = new MetadataResponsePartition().setLeaderId(partition % numNodes).setPartitionIndex(partition).setIsrNodes(Arrays.asList(0, 1, 2)).setReplicaNodes(Arrays.asList(0, 1, 2)).setOfflineReplicas(Collections.emptyList()).setErrorCode((short) 0);
            mrPartitionList.add(mrPartition);
        }
        mrTopic.setPartitions(mrPartitionList);
        mrTopicList.add(mrTopic);
    }
    data.setTopics(new MetadataResponseData.MetadataResponseTopicCollection(mrTopicList.listIterator()));
    long deadline = 0L;
    short version = 0;
    context = new MetadataOperationContext<>(topics, new ListOffsetsOptions(), deadline, futures);
    context.setResponse(Optional.of(new MetadataResponse(data, version)));
    AdminClientUnitTestEnv adminEnv = new AdminClientUnitTestEnv(mockCluster());
    admin = (KafkaAdminClient) adminEnv.adminClient();
}
Also used : MetadataResponseTopic(org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopic) AdminClientUnitTestEnv(org.apache.kafka.clients.admin.AdminClientUnitTestEnv) ListOffsetsOptions(org.apache.kafka.clients.admin.ListOffsetsOptions) ArrayList(java.util.ArrayList) MetadataResponsePartition(org.apache.kafka.common.message.MetadataResponseData.MetadataResponsePartition) Uuid(org.apache.kafka.common.Uuid) TopicPartition(org.apache.kafka.common.TopicPartition) MetadataResponseData(org.apache.kafka.common.message.MetadataResponseData) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) HashSet(java.util.HashSet) Setup(org.openjdk.jmh.annotations.Setup)

Example 3 with ListOffsetsOptions

use of org.apache.kafka.clients.admin.ListOffsetsOptions in project kafka by apache.

the class StoreChangelogReader method endOffsetForChangelogs.

private Map<TopicPartition, Long> endOffsetForChangelogs(final Map<TaskId, Task> tasks, final Set<TopicPartition> partitions) {
    if (partitions.isEmpty()) {
        return Collections.emptyMap();
    }
    try {
        final ListOffsetsResult result = adminClient.listOffsets(partitions.stream().collect(Collectors.toMap(Function.identity(), tp -> OffsetSpec.latest())), new ListOffsetsOptions(IsolationLevel.READ_UNCOMMITTED));
        final Map<TopicPartition, ListOffsetsResult.ListOffsetsResultInfo> resultPerPartition = result.all().get();
        clearTaskTimeout(getTasksFromPartitions(tasks, partitions));
        return resultPerPartition.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().offset()));
    } catch (final TimeoutException | InterruptedException | ExecutionException retriableException) {
        log.debug("Could not fetch all end offsets for {}, will retry in the next run loop", partitions);
        maybeInitTaskTimeoutOrThrow(getTasksFromPartitions(tasks, partitions), retriableException);
        return Collections.emptyMap();
    } catch (final KafkaException e) {
        throw new StreamsException(String.format("Failed to retrieve end offsets for %s", partitions), e);
    }
}
Also used : StreamsConfig(org.apache.kafka.streams.StreamsConfig) Arrays(java.util.Arrays) TaskId(org.apache.kafka.streams.processor.TaskId) KafkaException(org.apache.kafka.common.KafkaException) HashMap(java.util.HashMap) StreamsException(org.apache.kafka.streams.errors.StreamsException) ClientUtils.fetchCommittedOffsets(org.apache.kafka.streams.processor.internals.ClientUtils.fetchCommittedOffsets) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) Function(java.util.function.Function) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) LogContext(org.apache.kafka.common.utils.LogContext) Duration(java.time.Duration) Map(java.util.Map) Admin(org.apache.kafka.clients.admin.Admin) TaskCorruptedException(org.apache.kafka.streams.errors.TaskCorruptedException) Consumer(org.apache.kafka.clients.consumer.Consumer) TopicPartition(org.apache.kafka.common.TopicPartition) StateStoreMetadata(org.apache.kafka.streams.processor.internals.ProcessorStateManager.StateStoreMetadata) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Logger(org.slf4j.Logger) Time(org.apache.kafka.common.utils.Time) Collection(java.util.Collection) Set(java.util.Set) StateRestoreListener(org.apache.kafka.streams.processor.StateRestoreListener) Collectors(java.util.stream.Collectors) OffsetSpec(org.apache.kafka.clients.admin.OffsetSpec) ExecutionException(java.util.concurrent.ExecutionException) IsolationLevel(org.apache.kafka.common.IsolationLevel) List(java.util.List) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) InvalidOffsetException(org.apache.kafka.clients.consumer.InvalidOffsetException) ListOffsetsOptions(org.apache.kafka.clients.admin.ListOffsetsOptions) Collections(java.util.Collections) ListOffsetsOptions(org.apache.kafka.clients.admin.ListOffsetsOptions) StreamsException(org.apache.kafka.streams.errors.StreamsException) ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaException(org.apache.kafka.common.KafkaException) ExecutionException(java.util.concurrent.ExecutionException) HashMap(java.util.HashMap) Map(java.util.Map) TimeoutException(org.apache.kafka.common.errors.TimeoutException)

Example 4 with ListOffsetsOptions

use of org.apache.kafka.clients.admin.ListOffsetsOptions in project kafka by apache.

the class StoreChangelogReaderTest method shouldRequestEndOffsetsAndHandleTimeoutException.

@Test
public void shouldRequestEndOffsetsAndHandleTimeoutException() {
    final TaskId taskId = new TaskId(0, 0);
    final Task mockTask = mock(Task.class);
    mockTask.maybeInitTaskTimeoutOrThrow(anyLong(), anyObject());
    EasyMock.expectLastCall();
    EasyMock.expect(storeMetadata.offset()).andReturn(5L).anyTimes();
    EasyMock.expect(activeStateManager.changelogOffsets()).andReturn(singletonMap(tp, 5L));
    EasyMock.expect(activeStateManager.taskId()).andReturn(taskId).anyTimes();
    EasyMock.replay(mockTask, activeStateManager, storeMetadata, store);
    final AtomicBoolean functionCalled = new AtomicBoolean(false);
    final MockAdminClient adminClient = new MockAdminClient() {

        @Override
        public ListOffsetsResult listOffsets(final Map<TopicPartition, OffsetSpec> topicPartitionOffsets, final ListOffsetsOptions options) {
            if (functionCalled.get()) {
                return super.listOffsets(topicPartitionOffsets, options);
            } else {
                functionCalled.set(true);
                throw new TimeoutException("KABOOM!");
            }
        }
    };
    adminClient.updateEndOffsets(Collections.singletonMap(tp, 10L));
    final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {

        @Override
        public Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions) {
            throw new AssertionError("Should not trigger this function");
        }
    };
    final StoreChangelogReader changelogReader = new StoreChangelogReader(time, config, logContext, adminClient, consumer, callback);
    changelogReader.register(tp, activeStateManager);
    changelogReader.restore(Collections.singletonMap(taskId, mockTask));
    assertEquals(StoreChangelogReader.ChangelogState.REGISTERED, changelogReader.changelogMetadata(tp).state());
    assertNull(changelogReader.changelogMetadata(tp).endOffset());
    assertTrue(functionCalled.get());
    verify(mockTask);
    EasyMock.resetToDefault(mockTask);
    mockTask.clearTaskTimeout();
    EasyMock.replay(mockTask);
    changelogReader.restore(Collections.singletonMap(taskId, mockTask));
    assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
    assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(6L, consumer.position(tp));
    verify(mockTask);
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) ListOffsetsOptions(org.apache.kafka.clients.admin.ListOffsetsOptions) MockAdminClient(org.apache.kafka.clients.admin.MockAdminClient) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) Map(java.util.Map) Collections.singletonMap(java.util.Collections.singletonMap) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Test(org.junit.Test)

Aggregations

ListOffsetsOptions (org.apache.kafka.clients.admin.ListOffsetsOptions)4 Map (java.util.Map)3 TopicPartition (org.apache.kafka.common.TopicPartition)3 ArrayList (java.util.ArrayList)2 Collections.singletonMap (java.util.Collections.singletonMap)2 HashSet (java.util.HashSet)2 Set (java.util.Set)2 MockAdminClient (org.apache.kafka.clients.admin.MockAdminClient)2 TimeoutException (org.apache.kafka.common.errors.TimeoutException)2 Utils.mkMap (org.apache.kafka.common.utils.Utils.mkMap)2 TaskId (org.apache.kafka.streams.processor.TaskId)2 Duration (java.time.Duration)1 Arrays (java.util.Arrays)1 Collection (java.util.Collection)1 Collections (java.util.Collections)1 HashMap (java.util.HashMap)1 List (java.util.List)1 ExecutionException (java.util.concurrent.ExecutionException)1 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)1 Function (java.util.function.Function)1