Search in sources :

Example 6 with ListOffsetsResult

use of org.apache.kafka.clients.admin.ListOffsetsResult in project kafka by apache.

the class ClientUtilsTest method fetchEndOffsetsShouldRethrowExecutionExceptionAsStreamsException.

@Test
public void fetchEndOffsetsShouldRethrowExecutionExceptionAsStreamsException() throws Exception {
    final Admin adminClient = EasyMock.createMock(AdminClient.class);
    final ListOffsetsResult result = EasyMock.createNiceMock(ListOffsetsResult.class);
    final KafkaFuture<Map<TopicPartition, ListOffsetsResultInfo>> allFuture = EasyMock.createMock(KafkaFuture.class);
    EasyMock.expect(adminClient.listOffsets(EasyMock.anyObject())).andStubReturn(result);
    EasyMock.expect(result.all()).andStubReturn(allFuture);
    EasyMock.expect(allFuture.get()).andThrow(new ExecutionException(new RuntimeException()));
    replay(adminClient, result, allFuture);
    assertThrows(StreamsException.class, () -> fetchEndOffsets(PARTITIONS, adminClient));
    verify(adminClient);
}
Also used : ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) Admin(org.apache.kafka.clients.admin.Admin) ExecutionException(java.util.concurrent.ExecutionException) Map(java.util.Map) Test(org.junit.Test)

Example 7 with ListOffsetsResult

use of org.apache.kafka.clients.admin.ListOffsetsResult in project ksql by confluentinc.

the class KsqlEngine method getEndOffsetsForStreamPullQuery.

private ImmutableMap<TopicPartition, Long> getEndOffsetsForStreamPullQuery(final Admin admin, final TopicDescription topicDescription) {
    final Map<TopicPartition, OffsetSpec> topicPartitions = topicDescription.partitions().stream().map(td -> new TopicPartition(topicDescription.name(), td.partition())).collect(toMap(identity(), tp -> OffsetSpec.latest()));
    final ListOffsetsResult listOffsetsResult = admin.listOffsets(topicPartitions, // so we should do the same when checking end offsets.
    new ListOffsetsOptions(IsolationLevel.READ_UNCOMMITTED));
    final ImmutableMap<TopicPartition, Long> startOffsetsForStreamPullQuery = getStartOffsetsForStreamPullQuery(admin, topicDescription);
    try {
        final Map<TopicPartition, ListOffsetsResult.ListOffsetsResultInfo> partitionResultMap = listOffsetsResult.all().get(10, TimeUnit.SECONDS);
        final Map<TopicPartition, Long> result = partitionResultMap.entrySet().stream().filter(e -> e.getValue().offset() > 0L && e.getValue().offset() > startOffsetsForStreamPullQuery.get(e.getKey())).collect(toMap(Entry::getKey, e -> e.getValue().offset()));
        return ImmutableMap.copyOf(result);
    } catch (final InterruptedException e) {
        log.error("Admin#listOffsets(" + topicDescription.name() + ") interrupted", e);
        throw new KsqlServerException("Interrupted");
    } catch (final ExecutionException e) {
        log.error("Error executing Admin#listOffsets(" + topicDescription.name() + ")", e);
        throw new KsqlServerException("Internal Server Error");
    } catch (final TimeoutException e) {
        log.error("Admin#listOffsets(" + topicDescription.name() + ") timed out", e);
        throw new KsqlServerException("Backend timed out");
    }
}
Also used : Query(io.confluent.ksql.parser.tree.Query) UnaryOperator.identity(java.util.function.UnaryOperator.identity) SourceName(io.confluent.ksql.name.SourceName) ServiceContext(io.confluent.ksql.services.ServiceContext) LoggerFactory(org.slf4j.LoggerFactory) RoutingOptions(io.confluent.ksql.execution.streams.RoutingOptions) TimeoutException(java.util.concurrent.TimeoutException) ProcessingLogContext(io.confluent.ksql.logging.processing.ProcessingLogContext) MutableMetaStore(io.confluent.ksql.metastore.MutableMetaStore) Context(io.vertx.core.Context) TransientQueryMetadata(io.confluent.ksql.util.TransientQueryMetadata) ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) TransientQueryCleanupListener(io.confluent.ksql.internal.TransientQueryCleanupListener) RewrittenAnalysis(io.confluent.ksql.analyzer.RewrittenAnalysis) Collectors.toMap(java.util.stream.Collectors.toMap) Map(java.util.Map) QueryLogger(io.confluent.ksql.logging.query.QueryLogger) QueryId(io.confluent.ksql.query.QueryId) PersistentQueryMetadata(io.confluent.ksql.util.PersistentQueryMetadata) QueryMetadata(io.confluent.ksql.util.QueryMetadata) TopicPartition(org.apache.kafka.common.TopicPartition) ImmutableAnalysis(io.confluent.ksql.analyzer.ImmutableAnalysis) ImmutableMap(com.google.common.collect.ImmutableMap) FunctionRegistry(io.confluent.ksql.function.FunctionRegistry) ScalablePushQueryMetadata(io.confluent.ksql.util.ScalablePushQueryMetadata) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) ScalablePushQueryMetrics(io.confluent.ksql.internal.ScalablePushQueryMetrics) ConfiguredStatement(io.confluent.ksql.statement.ConfiguredStatement) KsqlConfig(io.confluent.ksql.util.KsqlConfig) KafkaFuture(org.apache.kafka.common.KafkaFuture) ExecutableDdlStatement(io.confluent.ksql.parser.tree.ExecutableDdlStatement) Executors(java.util.concurrent.Executors) Objects(java.util.Objects) MetaStoreImpl(io.confluent.ksql.metastore.MetaStoreImpl) List(java.util.List) PullQueryExecutorMetrics(io.confluent.ksql.internal.PullQueryExecutorMetrics) QueryPlannerOptions(io.confluent.ksql.planner.QueryPlannerOptions) KsqlExecutionContext(io.confluent.ksql.KsqlExecutionContext) ConsistencyOffsetVector(io.confluent.ksql.util.ConsistencyOffsetVector) StreamPullQueryMetadata(io.confluent.ksql.util.StreamPullQueryMetadata) Entry(java.util.Map.Entry) KsqlEngineMetrics(io.confluent.ksql.internal.KsqlEngineMetrics) KsqlException(io.confluent.ksql.util.KsqlException) Optional(java.util.Optional) Statement(io.confluent.ksql.parser.tree.Statement) Builder(com.google.common.collect.ImmutableList.Builder) SuppressFBWarnings(edu.umd.cs.findbugs.annotations.SuppressFBWarnings) PullQueryResult(io.confluent.ksql.physical.pull.PullQueryResult) HARouting(io.confluent.ksql.physical.pull.HARouting) StreamsConfig(org.apache.kafka.streams.StreamsConfig) PushRouting(io.confluent.ksql.physical.scalablepush.PushRouting) HashMap(java.util.HashMap) MetricCollectors(io.confluent.ksql.metrics.MetricCollectors) Function(java.util.function.Function) ImmutableList(com.google.common.collect.ImmutableList) Analysis(io.confluent.ksql.analyzer.Analysis) ConfiguredKsqlPlan(io.confluent.ksql.planner.plan.ConfiguredKsqlPlan) MetaStore(io.confluent.ksql.metastore.MetaStore) ParsedStatement(io.confluent.ksql.parser.KsqlParser.ParsedStatement) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) Admin(org.apache.kafka.clients.admin.Admin) PushRoutingOptions(io.confluent.ksql.physical.scalablepush.PushRoutingOptions) TopicDescription(org.apache.kafka.clients.admin.TopicDescription) QueryIdGenerator(io.confluent.ksql.query.id.QueryIdGenerator) QueryContainer(io.confluent.ksql.parser.tree.QueryContainer) Logger(org.slf4j.Logger) QueryAnalyzer(io.confluent.ksql.analyzer.QueryAnalyzer) ServiceInfo(io.confluent.ksql.ServiceInfo) KsqlStatementException(io.confluent.ksql.util.KsqlStatementException) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) OffsetSpec(org.apache.kafka.clients.admin.OffsetSpec) KsqlServerException(io.confluent.ksql.util.KsqlServerException) IsolationLevel(org.apache.kafka.common.IsolationLevel) StreamsErrorCollector(io.confluent.ksql.metrics.StreamsErrorCollector) Closeable(java.io.Closeable) VisibleForTesting(com.google.common.annotations.VisibleForTesting) ListOffsetsOptions(org.apache.kafka.clients.admin.ListOffsetsOptions) Collections(java.util.Collections) PreparedStatement(io.confluent.ksql.parser.KsqlParser.PreparedStatement) ListOffsetsOptions(org.apache.kafka.clients.admin.ListOffsetsOptions) KsqlServerException(io.confluent.ksql.util.KsqlServerException) ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetSpec(org.apache.kafka.clients.admin.OffsetSpec) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException)

Example 8 with ListOffsetsResult

use of org.apache.kafka.clients.admin.ListOffsetsResult in project kafka by apache.

the class HighAvailabilityStreamsPartitionAssignorTest method createMockAdminClient.

// If you don't care about setting the end offsets for each specific topic partition, the helper method
// getTopicPartitionOffsetMap is useful for building this input map for all partitions
private void createMockAdminClient(final Map<TopicPartition, Long> changelogEndOffsets) {
    adminClient = EasyMock.createMock(AdminClient.class);
    final ListOffsetsResult result = EasyMock.createNiceMock(ListOffsetsResult.class);
    final KafkaFutureImpl<Map<TopicPartition, ListOffsetsResultInfo>> allFuture = new KafkaFutureImpl<>();
    allFuture.complete(changelogEndOffsets.entrySet().stream().collect(Collectors.toMap(Entry::getKey, t -> {
        final ListOffsetsResultInfo info = EasyMock.createNiceMock(ListOffsetsResultInfo.class);
        expect(info.offset()).andStubReturn(t.getValue());
        EasyMock.replay(info);
        return info;
    })));
    expect(adminClient.listOffsets(anyObject())).andStubReturn(result);
    expect(result.all()).andReturn(allFuture);
    EasyMock.replay(result);
}
Also used : ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) Entry(java.util.Map.Entry) ListOffsetsResultInfo(org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) Map(java.util.Map) HashMap(java.util.HashMap) Collections.singletonMap(java.util.Collections.singletonMap) AdminClient(org.apache.kafka.clients.admin.AdminClient)

Example 9 with ListOffsetsResult

use of org.apache.kafka.clients.admin.ListOffsetsResult in project kafka by apache.

the class StoreChangelogReader method endOffsetForChangelogs.

private Map<TopicPartition, Long> endOffsetForChangelogs(final Map<TaskId, Task> tasks, final Set<TopicPartition> partitions) {
    if (partitions.isEmpty()) {
        return Collections.emptyMap();
    }
    try {
        final ListOffsetsResult result = adminClient.listOffsets(partitions.stream().collect(Collectors.toMap(Function.identity(), tp -> OffsetSpec.latest())), new ListOffsetsOptions(IsolationLevel.READ_UNCOMMITTED));
        final Map<TopicPartition, ListOffsetsResult.ListOffsetsResultInfo> resultPerPartition = result.all().get();
        clearTaskTimeout(getTasksFromPartitions(tasks, partitions));
        return resultPerPartition.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().offset()));
    } catch (final TimeoutException | InterruptedException | ExecutionException retriableException) {
        log.debug("Could not fetch all end offsets for {}, will retry in the next run loop", partitions);
        maybeInitTaskTimeoutOrThrow(getTasksFromPartitions(tasks, partitions), retriableException);
        return Collections.emptyMap();
    } catch (final KafkaException e) {
        throw new StreamsException(String.format("Failed to retrieve end offsets for %s", partitions), e);
    }
}
Also used : StreamsConfig(org.apache.kafka.streams.StreamsConfig) Arrays(java.util.Arrays) TaskId(org.apache.kafka.streams.processor.TaskId) KafkaException(org.apache.kafka.common.KafkaException) HashMap(java.util.HashMap) StreamsException(org.apache.kafka.streams.errors.StreamsException) ClientUtils.fetchCommittedOffsets(org.apache.kafka.streams.processor.internals.ClientUtils.fetchCommittedOffsets) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) Function(java.util.function.Function) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) LogContext(org.apache.kafka.common.utils.LogContext) Duration(java.time.Duration) Map(java.util.Map) Admin(org.apache.kafka.clients.admin.Admin) TaskCorruptedException(org.apache.kafka.streams.errors.TaskCorruptedException) Consumer(org.apache.kafka.clients.consumer.Consumer) TopicPartition(org.apache.kafka.common.TopicPartition) StateStoreMetadata(org.apache.kafka.streams.processor.internals.ProcessorStateManager.StateStoreMetadata) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Logger(org.slf4j.Logger) Time(org.apache.kafka.common.utils.Time) Collection(java.util.Collection) Set(java.util.Set) StateRestoreListener(org.apache.kafka.streams.processor.StateRestoreListener) Collectors(java.util.stream.Collectors) OffsetSpec(org.apache.kafka.clients.admin.OffsetSpec) ExecutionException(java.util.concurrent.ExecutionException) IsolationLevel(org.apache.kafka.common.IsolationLevel) List(java.util.List) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) InvalidOffsetException(org.apache.kafka.clients.consumer.InvalidOffsetException) ListOffsetsOptions(org.apache.kafka.clients.admin.ListOffsetsOptions) Collections(java.util.Collections) ListOffsetsOptions(org.apache.kafka.clients.admin.ListOffsetsOptions) StreamsException(org.apache.kafka.streams.errors.StreamsException) ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaException(org.apache.kafka.common.KafkaException) ExecutionException(java.util.concurrent.ExecutionException) HashMap(java.util.HashMap) Map(java.util.Map) TimeoutException(org.apache.kafka.common.errors.TimeoutException)

Example 10 with ListOffsetsResult

use of org.apache.kafka.clients.admin.ListOffsetsResult in project kafka by apache.

the class StreamsPartitionAssignorTest method shouldSkipListOffsetsRequestForNewlyCreatedChangelogTopics.

@Test
public void shouldSkipListOffsetsRequestForNewlyCreatedChangelogTopics() {
    adminClient = EasyMock.createMock(AdminClient.class);
    final ListOffsetsResult result = EasyMock.createNiceMock(ListOffsetsResult.class);
    final KafkaFutureImpl<Map<TopicPartition, ListOffsetsResultInfo>> allFuture = new KafkaFutureImpl<>();
    allFuture.complete(emptyMap());
    expect(adminClient.listOffsets(emptyMap())).andStubReturn(result);
    expect(result.all()).andReturn(allFuture);
    builder.addSource(null, "source1", null, null, null, "topic1");
    builder.addProcessor("processor1", new MockApiProcessorSupplier<>(), "source1");
    builder.addStateStore(new MockKeyValueStoreBuilder("store1", false), "processor1");
    subscriptions.put("consumer10", new Subscription(singletonList("topic1"), defaultSubscriptionInfo.encode()));
    EasyMock.replay(result);
    configureDefault();
    overwriteInternalTopicManagerWithMock(true);
    partitionAssignor.assign(metadata, new GroupSubscription(subscriptions));
    EasyMock.verify(adminClient);
}
Also used : ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) Map(java.util.Map) HashMap(java.util.HashMap) Collections.singletonMap(java.util.Collections.singletonMap) Collections.emptyMap(java.util.Collections.emptyMap) Matchers.anEmptyMap(org.hamcrest.Matchers.anEmptyMap) MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) AdminClient(org.apache.kafka.clients.admin.AdminClient) Test(org.junit.Test)

Aggregations

Map (java.util.Map)13 ListOffsetsResult (org.apache.kafka.clients.admin.ListOffsetsResult)13 HashMap (java.util.HashMap)9 Admin (org.apache.kafka.clients.admin.Admin)8 TopicPartition (org.apache.kafka.common.TopicPartition)6 Test (org.junit.Test)6 Entry (java.util.Map.Entry)5 Set (java.util.Set)5 ExecutionException (java.util.concurrent.ExecutionException)5 OffsetSpec (org.apache.kafka.clients.admin.OffsetSpec)5 KafkaFutureImpl (org.apache.kafka.common.internals.KafkaFutureImpl)5 Logger (org.slf4j.Logger)5 Collections (java.util.Collections)4 Objects (java.util.Objects)4 Function (java.util.function.Function)4 AdminClient (org.apache.kafka.clients.admin.AdminClient)4 ListOffsetsResultInfo (org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo)4 TopicDescription (org.apache.kafka.clients.admin.TopicDescription)4 LoggerFactory (org.slf4j.LoggerFactory)4 ImmutableList (com.google.common.collect.ImmutableList)3