use of io.confluent.ksql.test.tools.Topic in project ksql by confluentinc.
the class RestTestExecutor method waitForPersistentQueriesToProcessInputs.
/**
* This method looks at all of the source topics that feed into all of the subtopologies and waits
* for the consumer group associated with each application to reach the end offsets for those
* topics. This effectively ensures that nothing is lagging when it completes successfully.
* This should ensure that any materialized state has been built up correctly and is ready for
* pull queries.
*/
private void waitForPersistentQueriesToProcessInputs() {
// First wait for the queries to be in the RUNNING state
boolean allRunning = false;
final long queryRunningThreshold = System.currentTimeMillis() + MAX_QUERY_RUNNING_CHECK.toMillis();
while (System.currentTimeMillis() < queryRunningThreshold) {
boolean notReady = false;
for (PersistentQueryMetadata persistentQueryMetadata : engine.getPersistentQueries()) {
if (persistentQueryMetadata.getState() != State.RUNNING) {
notReady = true;
}
}
if (notReady) {
threadYield();
} else {
allRunning = true;
break;
}
}
if (!allRunning) {
throw new AssertionError("Timed out while trying to wait for queries to begin running");
}
// Collect all application ids
List<String> queryApplicationIds = engine.getPersistentQueries().stream().map(QueryMetadata::getQueryApplicationId).collect(Collectors.toList());
// Collect all possible source topic names for each application id
Map<String, Set<String>> possibleTopicNamesByAppId = engine.getPersistentQueries().stream().collect(Collectors.toMap(QueryMetadata::getQueryApplicationId, m -> {
Set<String> topics = getSourceTopics(m);
Set<String> possibleInternalNames = topics.stream().map(t -> m.getQueryApplicationId() + "-" + t).collect(Collectors.toSet());
Set<String> all = new HashSet<>();
all.addAll(topics);
all.addAll(possibleInternalNames);
return all;
}));
final Set<String> possibleTopicNames = possibleTopicNamesByAppId.values().stream().flatMap(Collection::stream).collect(Collectors.toSet());
// Every topic is either internal or not, so we expect to match exactly half of them.
int expectedTopics = possibleTopicNames.size() / 2;
// Find the intersection of possible topic names and real topic names, and wait until the
// expected number are all there
final Set<String> topics = new HashSet<>();
boolean foundTopics = false;
final long topicThreshold = System.currentTimeMillis() + MAX_TOPIC_NAME_LOOKUP.toMillis();
while (System.currentTimeMillis() < topicThreshold) {
Set<String> expectedNames = kafkaCluster.getTopics();
expectedNames.retainAll(possibleTopicNames);
if (expectedNames.size() == expectedTopics) {
foundTopics = true;
topics.addAll(expectedNames);
break;
}
}
if (!foundTopics) {
throw new AssertionError("Timed out while trying to find topics");
}
// Only retain topic names which are known to exist.
Map<String, Set<String>> topicNamesByAppId = possibleTopicNamesByAppId.entrySet().stream().collect(Collectors.toMap(Entry::getKey, e -> {
e.getValue().retainAll(topics);
return e.getValue();
}));
Map<String, Integer> partitionCount = kafkaCluster.getPartitionCount(topics);
Map<String, List<TopicPartition>> topicPartitionsByAppId = queryApplicationIds.stream().collect(Collectors.toMap(appId -> appId, appId -> {
final List<TopicPartition> allTopicPartitions = new ArrayList<>();
for (String topic : topicNamesByAppId.get(appId)) {
for (int i = 0; i < partitionCount.get(topic); i++) {
final TopicPartition tp = new TopicPartition(topic, i);
allTopicPartitions.add(tp);
}
}
return allTopicPartitions;
}));
final long threshold = System.currentTimeMillis() + MAX_STATIC_WARM_UP.toMillis();
mainloop: while (System.currentTimeMillis() < threshold) {
for (String queryApplicationId : queryApplicationIds) {
final List<TopicPartition> topicPartitions = topicPartitionsByAppId.get(queryApplicationId);
Map<TopicPartition, Long> currentOffsets = kafkaCluster.getConsumerGroupOffset(queryApplicationId);
Map<TopicPartition, Long> endOffsets = kafkaCluster.getEndOffsets(topicPartitions, // Since we're doing At Least Once, we can do read uncommitted.
IsolationLevel.READ_COMMITTED);
for (final TopicPartition tp : topicPartitions) {
if (!currentOffsets.containsKey(tp) && endOffsets.get(tp) > 0) {
LOG.info("Haven't committed offsets yet for " + tp + " end offset " + endOffsets.get(tp));
threadYield();
continue mainloop;
}
}
for (final Map.Entry<TopicPartition, Long> entry : currentOffsets.entrySet()) {
final TopicPartition tp = entry.getKey();
final long currentOffset = entry.getValue();
final long endOffset = endOffsets.get(tp);
if (currentOffset < endOffset) {
LOG.info("Offsets are not caught up current: " + currentOffsets + " end: " + endOffsets);
threadYield();
continue mainloop;
}
}
}
LOG.info("Offsets are all up to date");
return;
}
LOG.info("Timed out waiting for correct response");
throw new AssertionError("Timed out while trying to wait for offsets");
}
use of io.confluent.ksql.test.tools.Topic in project ksql by confluentinc.
the class RestTestExecutor method initializeTopics.
private void initializeTopics(final RestTestCase testCase) {
final Collection<Topic> topics = TestCaseBuilderUtil.getAllTopics(testCase.getStatements(), testCase.getTopics(), testCase.getOutputRecords(), testCase.getInputRecords(), TestFunctionRegistry.INSTANCE.get(), new KsqlConfig(testCase.getProperties()));
topics.forEach(topic -> {
final Runnable createJob = () -> kafkaCluster.createTopic(topic.getName(), topic.getNumPartitions(), topic.getReplicas());
// Test case could be trying to create a topic deleted by previous test.
// Need to wait for previous topic to be deleted async, until then requests will fail
RetryUtil.retryWithBackoff(12, 10, (int) TimeUnit.SECONDS.toMillis(10), createJob);
topic.getKeySchema().ifPresent(schema -> {
try {
serviceContext.getSchemaRegistryClient().register(KsqlConstants.getSRSubject(topic.getName(), true), schema);
} catch (final Exception e) {
throw new RuntimeException(e);
}
});
topic.getValueSchema().ifPresent(schema -> {
try {
serviceContext.getSchemaRegistryClient().register(KsqlConstants.getSRSubject(topic.getName(), false), schema);
} catch (final Exception e) {
throw new RuntimeException(e);
}
});
});
}
Aggregations