Search in sources :

Example 11 with ListOffsetsResult

use of org.apache.kafka.clients.admin.ListOffsetsResult in project kafka by apache.

the class StreamsPartitionAssignorTest method shouldRequestEndOffsetsForPreexistingChangelogs.

@Test
public void shouldRequestEndOffsetsForPreexistingChangelogs() {
    final Set<TopicPartition> changelogs = mkSet(new TopicPartition(APPLICATION_ID + "-store-changelog", 0), new TopicPartition(APPLICATION_ID + "-store-changelog", 1), new TopicPartition(APPLICATION_ID + "-store-changelog", 2));
    adminClient = EasyMock.createMock(AdminClient.class);
    final ListOffsetsResult result = EasyMock.createNiceMock(ListOffsetsResult.class);
    final KafkaFutureImpl<Map<TopicPartition, ListOffsetsResultInfo>> allFuture = new KafkaFutureImpl<>();
    allFuture.complete(changelogs.stream().collect(Collectors.toMap(tp -> tp, tp -> {
        final ListOffsetsResultInfo info = EasyMock.createNiceMock(ListOffsetsResultInfo.class);
        expect(info.offset()).andStubReturn(Long.MAX_VALUE);
        EasyMock.replay(info);
        return info;
    })));
    final Capture<Map<TopicPartition, OffsetSpec>> capturedChangelogs = EasyMock.newCapture();
    expect(adminClient.listOffsets(EasyMock.capture(capturedChangelogs))).andReturn(result).once();
    expect(result.all()).andReturn(allFuture);
    builder.addSource(null, "source1", null, null, null, "topic1");
    builder.addProcessor("processor1", new MockApiProcessorSupplier<>(), "source1");
    builder.addStateStore(new MockKeyValueStoreBuilder("store", false), "processor1");
    subscriptions.put("consumer10", new Subscription(singletonList("topic1"), defaultSubscriptionInfo.encode()));
    EasyMock.replay(result);
    configureDefault();
    overwriteInternalTopicManagerWithMock(false);
    partitionAssignor.assign(metadata, new GroupSubscription(subscriptions));
    EasyMock.verify(adminClient);
    assertThat(capturedChangelogs.getValue().keySet(), equalTo(changelogs));
}
Also used : ListOffsetsResultInfo(org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) TopicPartition(org.apache.kafka.common.TopicPartition) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) Map(java.util.Map) HashMap(java.util.HashMap) Collections.singletonMap(java.util.Collections.singletonMap) Collections.emptyMap(java.util.Collections.emptyMap) Matchers.anEmptyMap(org.hamcrest.Matchers.anEmptyMap) MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) AdminClient(org.apache.kafka.clients.admin.AdminClient) Test(org.junit.Test)

Example 12 with ListOffsetsResult

use of org.apache.kafka.clients.admin.ListOffsetsResult in project ksql by confluentinc.

the class KafkaEmbedded method getEndOffsets.

/**
 * The end offsets for a given collection of TopicPartitions
 * @param topicPartitions The collection of TopicPartitions to get end offsets for
 * @param isolationLevel The isolation level to use when reading end offsets.
 * @return The map of TopicPartition to end offset
 */
public Map<TopicPartition, Long> getEndOffsets(final Collection<TopicPartition> topicPartitions, final IsolationLevel isolationLevel) {
    final Map<TopicPartition, OffsetSpec> topicPartitionsWithSpec = topicPartitions.stream().collect(Collectors.toMap(tp -> tp, tp -> OffsetSpec.latest()));
    try (AdminClient adminClient = adminClient()) {
        final ListOffsetsResult listOffsetsResult = adminClient.listOffsets(topicPartitionsWithSpec, new ListOffsetsOptions(isolationLevel));
        final Map<TopicPartition, ListOffsetsResult.ListOffsetsResultInfo> partitionResultMap = listOffsetsResult.all().get();
        return partitionResultMap.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().offset()));
    } catch (final Exception e) {
        throw new RuntimeException(e);
    }
}
Also used : Arrays(java.util.Arrays) AssertEventually.assertThatEventually(io.confluent.ksql.test.util.AssertEventually.assertThatEventually) LoggerFactory(org.slf4j.LoggerFactory) ListConsumerGroupOffsetsResult(org.apache.kafka.clients.admin.ListConsumerGroupOffsetsResult) Supplier(java.util.function.Supplier) SecurityProtocol(org.apache.kafka.common.security.auth.SecurityProtocol) HashSet(java.util.HashSet) AdminClient(org.apache.kafka.clients.admin.AdminClient) ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) DescribeTopicsResult(org.apache.kafka.clients.admin.DescribeTopicsResult) ImmutableList(com.google.common.collect.ImmutableList) Map(java.util.Map) Admin(org.apache.kafka.clients.admin.Admin) KafkaServer(kafka.server.KafkaServer) TopicDescription(org.apache.kafka.clients.admin.TopicDescription) KafkaConfig(kafka.server.KafkaConfig) TopicPartition(org.apache.kafka.common.TopicPartition) Matchers.empty(org.hamcrest.Matchers.empty) ImmutableSet(com.google.common.collect.ImmutableSet) Properties(java.util.Properties) Logger(org.slf4j.Logger) ImmutableMap(com.google.common.collect.ImmutableMap) Files(java.nio.file.Files) SystemTime(org.apache.kafka.common.utils.SystemTime) Iterator(scala.collection.Iterator) Collection(java.util.Collection) AdminClientConfig(org.apache.kafka.clients.admin.AdminClientConfig) NewTopic(org.apache.kafka.clients.admin.NewTopic) Set(java.util.Set) IOException(java.io.IOException) Collectors(java.util.stream.Collectors) Objects(java.util.Objects) OffsetSpec(org.apache.kafka.clients.admin.OffsetSpec) EndPoint(kafka.cluster.EndPoint) IsolationLevel(org.apache.kafka.common.IsolationLevel) Paths(java.nio.file.Paths) Entry(java.util.Map.Entry) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) TestUtils(kafka.utils.TestUtils) Matchers.is(org.hamcrest.Matchers.is) ListOffsetsOptions(org.apache.kafka.clients.admin.ListOffsetsOptions) ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) TopicPartition(org.apache.kafka.common.TopicPartition) ListOffsetsOptions(org.apache.kafka.clients.admin.ListOffsetsOptions) OffsetSpec(org.apache.kafka.clients.admin.OffsetSpec) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) IOException(java.io.IOException) AdminClient(org.apache.kafka.clients.admin.AdminClient)

Example 13 with ListOffsetsResult

use of org.apache.kafka.clients.admin.ListOffsetsResult in project ksql by confluentinc.

the class KsqlEngine method getStartOffsetsForStreamPullQuery.

private ImmutableMap<TopicPartition, Long> getStartOffsetsForStreamPullQuery(final Admin admin, final TopicDescription topicDescription) {
    final Map<TopicPartition, OffsetSpec> topicPartitions = topicDescription.partitions().stream().map(td -> new TopicPartition(topicDescription.name(), td.partition())).collect(toMap(identity(), tp -> OffsetSpec.earliest()));
    final ListOffsetsResult listOffsetsResult = admin.listOffsets(topicPartitions, // so we should do the same when checking end offsets.
    new ListOffsetsOptions(IsolationLevel.READ_UNCOMMITTED));
    try {
        final Map<TopicPartition, ListOffsetsResult.ListOffsetsResultInfo> partitionResultMap = listOffsetsResult.all().get(10, TimeUnit.SECONDS);
        final Map<TopicPartition, Long> result = partitionResultMap.entrySet().stream().collect(toMap(Entry::getKey, e -> e.getValue().offset()));
        return ImmutableMap.copyOf(result);
    } catch (final InterruptedException e) {
        log.error("Admin#listOffsets(" + topicDescription.name() + ") interrupted", e);
        throw new KsqlServerException("Interrupted");
    } catch (final ExecutionException e) {
        log.error("Error executing Admin#listOffsets(" + topicDescription.name() + ")", e);
        throw new KsqlServerException("Internal Server Error");
    } catch (final TimeoutException e) {
        log.error("Admin#listOffsets(" + topicDescription.name() + ") timed out", e);
        throw new KsqlServerException("Backend timed out");
    }
}
Also used : Query(io.confluent.ksql.parser.tree.Query) UnaryOperator.identity(java.util.function.UnaryOperator.identity) SourceName(io.confluent.ksql.name.SourceName) ServiceContext(io.confluent.ksql.services.ServiceContext) LoggerFactory(org.slf4j.LoggerFactory) RoutingOptions(io.confluent.ksql.execution.streams.RoutingOptions) TimeoutException(java.util.concurrent.TimeoutException) ProcessingLogContext(io.confluent.ksql.logging.processing.ProcessingLogContext) MutableMetaStore(io.confluent.ksql.metastore.MutableMetaStore) Context(io.vertx.core.Context) TransientQueryMetadata(io.confluent.ksql.util.TransientQueryMetadata) ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) TransientQueryCleanupListener(io.confluent.ksql.internal.TransientQueryCleanupListener) RewrittenAnalysis(io.confluent.ksql.analyzer.RewrittenAnalysis) Collectors.toMap(java.util.stream.Collectors.toMap) Map(java.util.Map) QueryLogger(io.confluent.ksql.logging.query.QueryLogger) QueryId(io.confluent.ksql.query.QueryId) PersistentQueryMetadata(io.confluent.ksql.util.PersistentQueryMetadata) QueryMetadata(io.confluent.ksql.util.QueryMetadata) TopicPartition(org.apache.kafka.common.TopicPartition) ImmutableAnalysis(io.confluent.ksql.analyzer.ImmutableAnalysis) ImmutableMap(com.google.common.collect.ImmutableMap) FunctionRegistry(io.confluent.ksql.function.FunctionRegistry) ScalablePushQueryMetadata(io.confluent.ksql.util.ScalablePushQueryMetadata) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) ScalablePushQueryMetrics(io.confluent.ksql.internal.ScalablePushQueryMetrics) ConfiguredStatement(io.confluent.ksql.statement.ConfiguredStatement) KsqlConfig(io.confluent.ksql.util.KsqlConfig) KafkaFuture(org.apache.kafka.common.KafkaFuture) ExecutableDdlStatement(io.confluent.ksql.parser.tree.ExecutableDdlStatement) Executors(java.util.concurrent.Executors) Objects(java.util.Objects) MetaStoreImpl(io.confluent.ksql.metastore.MetaStoreImpl) List(java.util.List) PullQueryExecutorMetrics(io.confluent.ksql.internal.PullQueryExecutorMetrics) QueryPlannerOptions(io.confluent.ksql.planner.QueryPlannerOptions) KsqlExecutionContext(io.confluent.ksql.KsqlExecutionContext) ConsistencyOffsetVector(io.confluent.ksql.util.ConsistencyOffsetVector) StreamPullQueryMetadata(io.confluent.ksql.util.StreamPullQueryMetadata) Entry(java.util.Map.Entry) KsqlEngineMetrics(io.confluent.ksql.internal.KsqlEngineMetrics) KsqlException(io.confluent.ksql.util.KsqlException) Optional(java.util.Optional) Statement(io.confluent.ksql.parser.tree.Statement) Builder(com.google.common.collect.ImmutableList.Builder) SuppressFBWarnings(edu.umd.cs.findbugs.annotations.SuppressFBWarnings) PullQueryResult(io.confluent.ksql.physical.pull.PullQueryResult) HARouting(io.confluent.ksql.physical.pull.HARouting) StreamsConfig(org.apache.kafka.streams.StreamsConfig) PushRouting(io.confluent.ksql.physical.scalablepush.PushRouting) HashMap(java.util.HashMap) MetricCollectors(io.confluent.ksql.metrics.MetricCollectors) Function(java.util.function.Function) ImmutableList(com.google.common.collect.ImmutableList) Analysis(io.confluent.ksql.analyzer.Analysis) ConfiguredKsqlPlan(io.confluent.ksql.planner.plan.ConfiguredKsqlPlan) MetaStore(io.confluent.ksql.metastore.MetaStore) ParsedStatement(io.confluent.ksql.parser.KsqlParser.ParsedStatement) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) Admin(org.apache.kafka.clients.admin.Admin) PushRoutingOptions(io.confluent.ksql.physical.scalablepush.PushRoutingOptions) TopicDescription(org.apache.kafka.clients.admin.TopicDescription) QueryIdGenerator(io.confluent.ksql.query.id.QueryIdGenerator) QueryContainer(io.confluent.ksql.parser.tree.QueryContainer) Logger(org.slf4j.Logger) QueryAnalyzer(io.confluent.ksql.analyzer.QueryAnalyzer) ServiceInfo(io.confluent.ksql.ServiceInfo) KsqlStatementException(io.confluent.ksql.util.KsqlStatementException) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) OffsetSpec(org.apache.kafka.clients.admin.OffsetSpec) KsqlServerException(io.confluent.ksql.util.KsqlServerException) IsolationLevel(org.apache.kafka.common.IsolationLevel) StreamsErrorCollector(io.confluent.ksql.metrics.StreamsErrorCollector) Closeable(java.io.Closeable) VisibleForTesting(com.google.common.annotations.VisibleForTesting) ListOffsetsOptions(org.apache.kafka.clients.admin.ListOffsetsOptions) Collections(java.util.Collections) PreparedStatement(io.confluent.ksql.parser.KsqlParser.PreparedStatement) ListOffsetsOptions(org.apache.kafka.clients.admin.ListOffsetsOptions) KsqlServerException(io.confluent.ksql.util.KsqlServerException) ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetSpec(org.apache.kafka.clients.admin.OffsetSpec) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException)

Aggregations

Map (java.util.Map)13 ListOffsetsResult (org.apache.kafka.clients.admin.ListOffsetsResult)13 HashMap (java.util.HashMap)9 Admin (org.apache.kafka.clients.admin.Admin)8 TopicPartition (org.apache.kafka.common.TopicPartition)6 Test (org.junit.Test)6 Entry (java.util.Map.Entry)5 Set (java.util.Set)5 ExecutionException (java.util.concurrent.ExecutionException)5 OffsetSpec (org.apache.kafka.clients.admin.OffsetSpec)5 KafkaFutureImpl (org.apache.kafka.common.internals.KafkaFutureImpl)5 Logger (org.slf4j.Logger)5 Collections (java.util.Collections)4 Objects (java.util.Objects)4 Function (java.util.function.Function)4 AdminClient (org.apache.kafka.clients.admin.AdminClient)4 ListOffsetsResultInfo (org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo)4 TopicDescription (org.apache.kafka.clients.admin.TopicDescription)4 LoggerFactory (org.slf4j.LoggerFactory)4 ImmutableList (com.google.common.collect.ImmutableList)3