Search in sources :

Example 1 with Topic

use of io.confluent.ksql.test.tools.Topic in project ksql by confluentinc.

the class RestTestExecutor method waitForPersistentQueriesToProcessInputs.

/**
 * This method looks at all of the source topics that feed into all of the subtopologies and waits
 * for the consumer group associated with each application to reach the end offsets for those
 * topics.  This effectively ensures that nothing is lagging when it completes successfully.
 * This should ensure that any materialized state has been built up correctly and is ready for
 * pull queries.
 */
private void waitForPersistentQueriesToProcessInputs() {
    // First wait for the queries to be in the RUNNING state
    boolean allRunning = false;
    final long queryRunningThreshold = System.currentTimeMillis() + MAX_QUERY_RUNNING_CHECK.toMillis();
    while (System.currentTimeMillis() < queryRunningThreshold) {
        boolean notReady = false;
        for (PersistentQueryMetadata persistentQueryMetadata : engine.getPersistentQueries()) {
            if (persistentQueryMetadata.getState() != State.RUNNING) {
                notReady = true;
            }
        }
        if (notReady) {
            threadYield();
        } else {
            allRunning = true;
            break;
        }
    }
    if (!allRunning) {
        throw new AssertionError("Timed out while trying to wait for queries to begin running");
    }
    // Collect all application ids
    List<String> queryApplicationIds = engine.getPersistentQueries().stream().map(QueryMetadata::getQueryApplicationId).collect(Collectors.toList());
    // Collect all possible source topic names for each application id
    Map<String, Set<String>> possibleTopicNamesByAppId = engine.getPersistentQueries().stream().collect(Collectors.toMap(QueryMetadata::getQueryApplicationId, m -> {
        Set<String> topics = getSourceTopics(m);
        Set<String> possibleInternalNames = topics.stream().map(t -> m.getQueryApplicationId() + "-" + t).collect(Collectors.toSet());
        Set<String> all = new HashSet<>();
        all.addAll(topics);
        all.addAll(possibleInternalNames);
        return all;
    }));
    final Set<String> possibleTopicNames = possibleTopicNamesByAppId.values().stream().flatMap(Collection::stream).collect(Collectors.toSet());
    // Every topic is either internal or not, so we expect to match exactly half of them.
    int expectedTopics = possibleTopicNames.size() / 2;
    // Find the intersection of possible topic names and real topic names, and wait until the
    // expected number are all there
    final Set<String> topics = new HashSet<>();
    boolean foundTopics = false;
    final long topicThreshold = System.currentTimeMillis() + MAX_TOPIC_NAME_LOOKUP.toMillis();
    while (System.currentTimeMillis() < topicThreshold) {
        Set<String> expectedNames = kafkaCluster.getTopics();
        expectedNames.retainAll(possibleTopicNames);
        if (expectedNames.size() == expectedTopics) {
            foundTopics = true;
            topics.addAll(expectedNames);
            break;
        }
    }
    if (!foundTopics) {
        throw new AssertionError("Timed out while trying to find topics");
    }
    // Only retain topic names which are known to exist.
    Map<String, Set<String>> topicNamesByAppId = possibleTopicNamesByAppId.entrySet().stream().collect(Collectors.toMap(Entry::getKey, e -> {
        e.getValue().retainAll(topics);
        return e.getValue();
    }));
    Map<String, Integer> partitionCount = kafkaCluster.getPartitionCount(topics);
    Map<String, List<TopicPartition>> topicPartitionsByAppId = queryApplicationIds.stream().collect(Collectors.toMap(appId -> appId, appId -> {
        final List<TopicPartition> allTopicPartitions = new ArrayList<>();
        for (String topic : topicNamesByAppId.get(appId)) {
            for (int i = 0; i < partitionCount.get(topic); i++) {
                final TopicPartition tp = new TopicPartition(topic, i);
                allTopicPartitions.add(tp);
            }
        }
        return allTopicPartitions;
    }));
    final long threshold = System.currentTimeMillis() + MAX_STATIC_WARM_UP.toMillis();
    mainloop: while (System.currentTimeMillis() < threshold) {
        for (String queryApplicationId : queryApplicationIds) {
            final List<TopicPartition> topicPartitions = topicPartitionsByAppId.get(queryApplicationId);
            Map<TopicPartition, Long> currentOffsets = kafkaCluster.getConsumerGroupOffset(queryApplicationId);
            Map<TopicPartition, Long> endOffsets = kafkaCluster.getEndOffsets(topicPartitions, // Since we're doing At Least Once, we can do read uncommitted.
            IsolationLevel.READ_COMMITTED);
            for (final TopicPartition tp : topicPartitions) {
                if (!currentOffsets.containsKey(tp) && endOffsets.get(tp) > 0) {
                    LOG.info("Haven't committed offsets yet for " + tp + " end offset " + endOffsets.get(tp));
                    threadYield();
                    continue mainloop;
                }
            }
            for (final Map.Entry<TopicPartition, Long> entry : currentOffsets.entrySet()) {
                final TopicPartition tp = entry.getKey();
                final long currentOffset = entry.getValue();
                final long endOffset = endOffsets.get(tp);
                if (currentOffset < endOffset) {
                    LOG.info("Offsets are not caught up current: " + currentOffsets + " end: " + endOffsets);
                    threadYield();
                    continue mainloop;
                }
            }
        }
        LOG.info("Offsets are all up to date");
        return;
    }
    LOG.info("Timed out waiting for correct response");
    throw new AssertionError("Timed out while trying to wait for offsets");
}
Also used : StreamPublisher(io.confluent.ksql.rest.client.StreamPublisher) StringDescription(org.hamcrest.StringDescription) URL(java.net.URL) ServiceContext(io.confluent.ksql.services.ServiceContext) LoggerFactory(org.slf4j.LoggerFactory) RestResponse(io.confluent.ksql.rest.client.RestResponse) Matchers.hasKey(org.hamcrest.Matchers.hasKey) TransientQueryMetadata(io.confluent.ksql.util.TransientQueryMetadata) BigDecimal(java.math.BigDecimal) Future(java.util.concurrent.Future) Record(io.confluent.ksql.test.tools.Record) Duration(java.time.Duration) Map(java.util.Map) QueryStreamSubscriber(io.confluent.ksql.rest.integration.QueryStreamSubscriber) JsonNode(com.fasterxml.jackson.databind.JsonNode) TypeReference(com.fasterxml.jackson.core.type.TypeReference) PersistentQueryMetadata(io.confluent.ksql.util.PersistentQueryMetadata) QueryMetadata(io.confluent.ksql.util.QueryMetadata) TestHeader(io.confluent.ksql.test.model.TestHeader) TopicPartition(org.apache.kafka.common.TopicPartition) ImmutableMap(com.google.common.collect.ImmutableMap) Collection(java.util.Collection) TopicInfo(io.confluent.ksql.test.tools.TopicInfoCache.TopicInfo) Set(java.util.Set) KsqlConfig(io.confluent.ksql.util.KsqlConfig) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) State(org.apache.kafka.streams.KafkaStreams.State) Collectors(java.util.stream.Collectors) Matchers.startsWith(org.hamcrest.Matchers.startsWith) Matchers.instanceOf(org.hamcrest.Matchers.instanceOf) Objects(java.util.Objects) TopicInfoCache(io.confluent.ksql.test.tools.TopicInfoCache) List(java.util.List) Topic(io.confluent.ksql.test.tools.Topic) KsqlExecutionContext(io.confluent.ksql.KsqlExecutionContext) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Entry(java.util.Map.Entry) KsqlException(io.confluent.ksql.util.KsqlException) Optional(java.util.Optional) Matchers.is(org.hamcrest.Matchers.is) Response(io.confluent.ksql.test.rest.model.Response) KsqlConstants(io.confluent.ksql.util.KsqlConstants) TestFunctionRegistry(io.confluent.ksql.function.TestFunctionRegistry) TestJsonMapper(io.confluent.ksql.test.tools.TestJsonMapper) Source(org.apache.kafka.streams.TopologyDescription.Source) IntStream(java.util.stream.IntStream) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) StreamedRow(io.confluent.ksql.rest.entity.StreamedRow) EmbeddedSingleNodeKafkaCluster(io.confluent.ksql.test.util.EmbeddedSingleNodeKafkaCluster) KsqlEntityList(io.confluent.ksql.rest.entity.KsqlEntityList) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) ArrayList(java.util.ArrayList) KsqlEntity(io.confluent.ksql.rest.entity.KsqlEntity) HashSet(java.util.HashSet) LinkedHashMap(java.util.LinkedHashMap) TopologyDescription(org.apache.kafka.streams.TopologyDescription) KsqlRestClient(io.confluent.ksql.rest.client.KsqlRestClient) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) ImmutableList(com.google.common.collect.ImmutableList) TestCaseBuilderUtil(io.confluent.ksql.test.tools.TestCaseBuilderUtil) Objects.requireNonNull(java.util.Objects.requireNonNull) ExpectedRecordComparator(io.confluent.ksql.test.tools.ExpectedRecordComparator) Matchers.hasSize(org.hamcrest.Matchers.hasSize) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Matchers.greaterThanOrEqualTo(org.hamcrest.Matchers.greaterThanOrEqualTo) Logger(org.slf4j.Logger) RetryUtil(io.confluent.ksql.util.RetryUtil) TimeUnit(java.util.concurrent.TimeUnit) KsqlServerException(io.confluent.ksql.util.KsqlServerException) IsolationLevel(org.apache.kafka.common.IsolationLevel) Subtopology(org.apache.kafka.streams.TopologyDescription.Subtopology) Closeable(java.io.Closeable) Matcher(org.hamcrest.Matcher) KsqlStatementErrorMessage(io.confluent.ksql.rest.entity.KsqlStatementErrorMessage) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Set(java.util.Set) HashSet(java.util.HashSet) Entry(java.util.Map.Entry) TopicPartition(org.apache.kafka.common.TopicPartition) List(java.util.List) KsqlEntityList(io.confluent.ksql.rest.entity.KsqlEntityList) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) PersistentQueryMetadata(io.confluent.ksql.util.PersistentQueryMetadata) HashSet(java.util.HashSet)

Example 2 with Topic

use of io.confluent.ksql.test.tools.Topic in project ksql by confluentinc.

the class RestTestExecutor method initializeTopics.

private void initializeTopics(final RestTestCase testCase) {
    final Collection<Topic> topics = TestCaseBuilderUtil.getAllTopics(testCase.getStatements(), testCase.getTopics(), testCase.getOutputRecords(), testCase.getInputRecords(), TestFunctionRegistry.INSTANCE.get(), new KsqlConfig(testCase.getProperties()));
    topics.forEach(topic -> {
        final Runnable createJob = () -> kafkaCluster.createTopic(topic.getName(), topic.getNumPartitions(), topic.getReplicas());
        // Test case could be trying to create a topic deleted by previous test.
        // Need to wait for previous topic to be deleted async, until then requests will fail
        RetryUtil.retryWithBackoff(12, 10, (int) TimeUnit.SECONDS.toMillis(10), createJob);
        topic.getKeySchema().ifPresent(schema -> {
            try {
                serviceContext.getSchemaRegistryClient().register(KsqlConstants.getSRSubject(topic.getName(), true), schema);
            } catch (final Exception e) {
                throw new RuntimeException(e);
            }
        });
        topic.getValueSchema().ifPresent(schema -> {
            try {
                serviceContext.getSchemaRegistryClient().register(KsqlConstants.getSRSubject(topic.getName(), false), schema);
            } catch (final Exception e) {
                throw new RuntimeException(e);
            }
        });
    });
}
Also used : KsqlConfig(io.confluent.ksql.util.KsqlConfig) Topic(io.confluent.ksql.test.tools.Topic) KsqlException(io.confluent.ksql.util.KsqlException) KsqlServerException(io.confluent.ksql.util.KsqlServerException)

Aggregations

Topic (io.confluent.ksql.test.tools.Topic)2 KsqlConfig (io.confluent.ksql.util.KsqlConfig)2 KsqlException (io.confluent.ksql.util.KsqlException)2 KsqlServerException (io.confluent.ksql.util.KsqlServerException)2 TypeReference (com.fasterxml.jackson.core.type.TypeReference)1 JsonNode (com.fasterxml.jackson.databind.JsonNode)1 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 Preconditions (com.google.common.base.Preconditions)1 ImmutableList (com.google.common.collect.ImmutableList)1 ImmutableMap (com.google.common.collect.ImmutableMap)1 KsqlExecutionContext (io.confluent.ksql.KsqlExecutionContext)1 TestFunctionRegistry (io.confluent.ksql.function.TestFunctionRegistry)1 KsqlRestClient (io.confluent.ksql.rest.client.KsqlRestClient)1 RestResponse (io.confluent.ksql.rest.client.RestResponse)1 StreamPublisher (io.confluent.ksql.rest.client.StreamPublisher)1 KsqlEntity (io.confluent.ksql.rest.entity.KsqlEntity)1 KsqlEntityList (io.confluent.ksql.rest.entity.KsqlEntityList)1 KsqlStatementErrorMessage (io.confluent.ksql.rest.entity.KsqlStatementErrorMessage)1 StreamedRow (io.confluent.ksql.rest.entity.StreamedRow)1 QueryStreamSubscriber (io.confluent.ksql.rest.integration.QueryStreamSubscriber)1