use of io.confluent.ksql.services.KafkaTopicClient in project ksql by confluentinc.
the class ListTopicsExecutor method execute.
public static StatementExecutorResponse execute(final ConfiguredStatement<ListTopics> statement, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final ServiceContext serviceContext) {
final KafkaTopicClient client = serviceContext.getTopicClient();
final Map<String, TopicDescription> topicDescriptions = listTopics(client, statement);
if (statement.getStatement().getShowExtended()) {
final KafkaConsumerGroupClient consumerGroupClient = new KafkaConsumerGroupClientImpl(serviceContext::getAdminClient);
final Map<String, List<Integer>> topicConsumersAndGroupCount = getTopicConsumerAndGroupCounts(consumerGroupClient);
final List<KafkaTopicInfoExtended> topicInfoExtendedList = topicDescriptions.values().stream().map(desc -> topicDescriptionToTopicInfoExtended(desc, topicConsumersAndGroupCount)).collect(Collectors.toList());
return StatementExecutorResponse.handled(Optional.of(new KafkaTopicsListExtended(statement.getStatementText(), topicInfoExtendedList)));
} else {
final List<KafkaTopicInfo> topicInfoList = topicDescriptions.values().stream().map(ListTopicsExecutor::topicDescriptionToTopicInfo).collect(Collectors.toList());
return StatementExecutorResponse.handled(Optional.of(new KafkaTopicsList(statement.getStatementText(), topicInfoList)));
}
}
use of io.confluent.ksql.services.KafkaTopicClient in project ksql by confluentinc.
the class OrphanedTransientQueryCleaner method cleanupOrphanedInternalTopics.
/**
* Cleans up any internal topics that may exist for the given set of query application
* ids, since it's assumed that they are completed.
* @param serviceContext The service context
* @param queryApplicationIds The set of completed query application ids
*/
public void cleanupOrphanedInternalTopics(final ServiceContext serviceContext, final Set<String> queryApplicationIds) {
final KafkaTopicClient topicClient = serviceContext.getTopicClient();
final Set<String> topicNames;
try {
topicNames = topicClient.listTopicNames();
} catch (KafkaResponseGetFailedException e) {
LOG.error("Couldn't fetch topic names", e);
return;
}
// Find any transient query topics
final Set<String> orphanedQueryApplicationIds = topicNames.stream().map(topicName -> queryApplicationIds.stream().filter(topicName::startsWith).findFirst()).filter(Optional::isPresent).map(Optional::get).collect(Collectors.toSet());
for (final String queryApplicationId : orphanedQueryApplicationIds) {
cleanupService.addCleanupTask(new QueryCleanupService.QueryCleanupTask(serviceContext, queryApplicationId, Optional.empty(), true, ksqlConfig.getKsqlStreamConfigProps().getOrDefault(StreamsConfig.STATE_DIR_CONFIG, StreamsConfig.configDef().defaultValues().get(StreamsConfig.STATE_DIR_CONFIG)).toString(), ksqlConfig.getString(KsqlConfig.KSQL_SERVICE_ID_CONFIG), ksqlConfig.getString(KsqlConfig.KSQL_PERSISTENT_QUERY_NAME_PREFIX_CONFIG)));
}
}
use of io.confluent.ksql.services.KafkaTopicClient in project ksql by confluentinc.
the class KsqlContextTestUtil method create.
public static KsqlContext create(final KsqlConfig ksqlConfig, final SchemaRegistryClient schemaRegistryClient, final FunctionRegistry functionRegistry) {
final KafkaClientSupplier clientSupplier = new DefaultKafkaClientSupplier();
final Admin adminClient = clientSupplier.getAdmin(ksqlConfig.getKsqlAdminClientConfigProps());
final KafkaTopicClient kafkaTopicClient = new KafkaTopicClientImpl(() -> adminClient);
final ServiceContext serviceContext = TestServiceContext.create(clientSupplier, adminClient, kafkaTopicClient, () -> schemaRegistryClient, new DefaultConnectClientFactory(ksqlConfig).get(Optional.empty(), Collections.emptyList(), Optional.empty()));
final String metricsPrefix = "instance-" + COUNTER.getAndIncrement() + "-";
final KsqlEngine engine = new KsqlEngine(serviceContext, ProcessingLogContext.create(), functionRegistry, ServiceInfo.create(ksqlConfig, metricsPrefix), new SequentialQueryIdGenerator(), ksqlConfig, Collections.emptyList(), new MetricCollectors());
return new KsqlContext(serviceContext, ksqlConfig, engine, Injectors.DEFAULT);
}
use of io.confluent.ksql.services.KafkaTopicClient in project ksql by confluentinc.
the class IntegrationTestHarness method ensureTopics.
/**
* Ensure topics with the given {@code topicNames} exist.
*
* <p>Topics will be created, if they do not already exist, with the specified
* {@code partitionCount}.
*
* @param topicNames the names of the topics to create.
*/
public void ensureTopics(final int partitionCount, final String... topicNames) {
final KafkaTopicClient topicClient = serviceContext.get().getTopicClient();
Arrays.stream(topicNames).filter(name -> !topicClient.isTopicExists(name)).forEach(name -> topicClient.createTopic(name, partitionCount, DEFAULT_REPLICATION_FACTOR));
}
use of io.confluent.ksql.services.KafkaTopicClient in project ksql by confluentinc.
the class KsqlRestoreCommandTopic method maybeCleanUpQuery.
private static void maybeCleanUpQuery(final byte[] command, final KsqlConfig ksqlConfig) {
boolean queryIdFound = false;
final Map<String, Object> streamsProperties = new HashMap<>(ksqlConfig.getKsqlStreamConfigProps());
boolean sharedRuntimeQuery = false;
String queryId = "";
final JSONObject jsonObject = new JSONObject(new String(command, StandardCharsets.UTF_8));
if (hasKey(jsonObject, "plan")) {
final JSONObject plan = jsonObject.getJSONObject("plan");
if (hasKey(plan, "queryPlan")) {
final JSONObject queryPlan = plan.getJSONObject("queryPlan");
queryId = queryPlan.getString("queryId");
if (hasKey(queryPlan, "runtimeId") && ((Optional<String>) queryPlan.get("runtimeId")).isPresent()) {
streamsProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, ((Optional<String>) queryPlan.get("runtimeId")).get());
sharedRuntimeQuery = true;
} else {
streamsProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, QueryApplicationId.build(ksqlConfig, true, new QueryId(queryId)));
}
queryIdFound = true;
}
}
// the command contains a query, clean up it's internal state store and also the internal topics
if (queryIdFound) {
final StreamsConfig streamsConfig = new StreamsConfig(streamsProperties);
final String topicPrefix = sharedRuntimeQuery ? streamsConfig.getString(StreamsConfig.APPLICATION_ID_CONFIG) : QueryApplicationId.buildInternalTopicPrefix(ksqlConfig, sharedRuntimeQuery) + queryId;
try {
final Admin admin = new DefaultKafkaClientSupplier().getAdmin(ksqlConfig.getKsqlAdminClientConfigProps());
final KafkaTopicClient topicClient = new KafkaTopicClientImpl(() -> admin);
topicClient.deleteInternalTopics(topicPrefix);
new StateDirectory(streamsConfig, Time.SYSTEM, true, ksqlConfig.getBoolean(KsqlConfig.KSQL_SHARED_RUNTIME_ENABLED)).clean();
System.out.println(String.format("Cleaned up internal state store and internal topics for query %s", topicPrefix));
} catch (final Exception e) {
System.out.println(String.format("Failed to clean up query %s ", topicPrefix));
}
}
}
Aggregations