use of io.confluent.ksql.services.KafkaTopicClient in project ksql by confluentinc.
the class ListSourceExecutorTest method shouldShowTablesExtended.
@Test
public void shouldShowTablesExtended() {
// Given:
final KsqlTable<?> table1 = engine.givenSource(DataSourceType.KTABLE, "table1");
final KsqlTable<?> table2 = engine.givenSource(DataSourceType.KTABLE, "table2", ImmutableSet.of(SourceName.of("table1")));
engine.givenSource(DataSourceType.KSTREAM, "stream");
// When:
final SourceDescriptionList descriptionList = (SourceDescriptionList) CUSTOM_EXECUTORS.listTables().execute((ConfiguredStatement<ListTables>) engine.configure("LIST TABLES EXTENDED;"), SESSION_PROPERTIES, engine.getEngine(), engine.getServiceContext()).getEntity().orElseThrow(IllegalStateException::new);
// Then:
final KafkaTopicClient client = engine.getServiceContext().getTopicClient();
assertThat(descriptionList.getSourceDescriptions(), containsInAnyOrder(SourceDescriptionFactory.create(table1, true, ImmutableList.of(), ImmutableList.of(), Optional.of(client.describeTopic(table1.getKafkaTopicName())), ImmutableList.of(), ImmutableList.of("table2"), new MetricCollectors()), SourceDescriptionFactory.create(table2, true, ImmutableList.of(), ImmutableList.of(), Optional.of(client.describeTopic(table1.getKafkaTopicName())), ImmutableList.of(), ImmutableList.of(), new MetricCollectors())));
}
use of io.confluent.ksql.services.KafkaTopicClient in project ksql by confluentinc.
the class TestExecutorUtil method initializeTopics.
private static void initializeTopics(final TestCase testCase, final ServiceContext serviceContext, final StubKafkaService stubKafkaService, final FunctionRegistry functionRegistry, final KsqlConfig ksqlConfig) {
final KafkaTopicClient topicClient = serviceContext.getTopicClient();
final SchemaRegistryClient srClient = serviceContext.getSchemaRegistryClient();
final List<String> statements = testCase.getExpectedTopology().isPresent() ? // Historic plans have already their topics already captured
ImmutableList.of() : // Non-historic plans need to capture topics from stmts
testCase.statements();
final Collection<Topic> topics = TestCaseBuilderUtil.getAllTopics(statements, testCase.getTopics(), testCase.getOutputRecords(), testCase.getInputRecords(), functionRegistry, testCase.applyProperties(ksqlConfig));
for (final Topic topic : topics) {
stubKafkaService.ensureTopic(topic);
topicClient.createTopic(topic.getName(), topic.getNumPartitions(), topic.getReplicas());
topic.getKeySchema().ifPresent(schema -> {
try {
srClient.register(KsqlConstants.getSRSubject(topic.getName(), true), schema);
} catch (final Exception e) {
throw new RuntimeException(e);
}
});
topic.getValueSchema().ifPresent(schema -> {
try {
srClient.register(KsqlConstants.getSRSubject(topic.getName(), false), schema);
} catch (final Exception e) {
throw new RuntimeException(e);
}
});
}
}
use of io.confluent.ksql.services.KafkaTopicClient in project ksql by confluentinc.
the class IntegrationTestHarness method deleteInternalTopics.
/**
* Deletes internal topics for the given application.
*/
public void deleteInternalTopics(String applicationId) {
final KafkaTopicClient topicClient = serviceContext.get().getTopicClient();
topicClient.deleteInternalTopics(applicationId);
}
use of io.confluent.ksql.services.KafkaTopicClient in project ksql by confluentinc.
the class ListSourceExecutorTest method shouldNotCallTopicClientForExtendedDescription.
@Test
public void shouldNotCallTopicClientForExtendedDescription() {
// Given:
engine.givenSource(DataSourceType.KSTREAM, "stream1");
final KafkaTopicClient spyTopicClient = spy(engine.getServiceContext().getTopicClient());
final ServiceContext serviceContext = TestServiceContext.create(engine.getServiceContext().getKafkaClientSupplier(), engine.getServiceContext().getAdminClient(), spyTopicClient, engine.getServiceContext().getSchemaRegistryClientFactory(), engine.getServiceContext().getConnectClient());
// When:
CUSTOM_EXECUTORS.listStreams().execute((ConfiguredStatement<ListStreams>) engine.configure("SHOW STREAMS;"), SESSION_PROPERTIES, engine.getEngine(), serviceContext).getEntity().orElseThrow(IllegalStateException::new);
// Then:
verify(spyTopicClient, never()).describeTopic(anyString());
}
use of io.confluent.ksql.services.KafkaTopicClient in project ksql by confluentinc.
the class KsqlRestApplication method buildApplication.
@SuppressWarnings({ "checkstyle:JavaNCSS", "checkstyle:MethodLength", "checkstyle:ParameterNumber" })
static KsqlRestApplication buildApplication(final String metricsPrefix, final KsqlRestConfig restConfig, final Function<Supplier<Boolean>, VersionCheckerAgent> versionCheckerFactory, final int maxStatementRetries, final ServiceContext serviceContext, final Supplier<SchemaRegistryClient> schemaRegistryClientFactory, final ConnectClientFactory connectClientFactory, final Vertx vertx, final KsqlClient sharedClient, final DefaultServiceContextFactory defaultServiceContextFactory, final UserServiceContextFactory userServiceContextFactory, final MetricCollectors metricCollectors) {
final String ksqlInstallDir = restConfig.getString(KsqlRestConfig.INSTALL_DIR_CONFIG);
final KsqlConfig ksqlConfig = new KsqlConfig(restConfig.getKsqlConfigProperties());
final ProcessingLogConfig processingLogConfig = new ProcessingLogConfig(restConfig.getOriginals());
final ProcessingLogContext processingLogContext = ProcessingLogContext.create(processingLogConfig);
final MutableFunctionRegistry functionRegistry = new InternalFunctionRegistry();
if (restConfig.getBoolean(KsqlRestConfig.KSQL_SERVER_ENABLE_UNCAUGHT_EXCEPTION_HANDLER)) {
Thread.setDefaultUncaughtExceptionHandler(new KsqlUncaughtExceptionHandler(LogManager::shutdown));
}
final SpecificQueryIdGenerator specificQueryIdGenerator = new SpecificQueryIdGenerator();
final String stateDir = ksqlConfig.getKsqlStreamConfigProps().getOrDefault(StreamsConfig.STATE_DIR_CONFIG, StreamsConfig.configDef().defaultValues().get(StreamsConfig.STATE_DIR_CONFIG)).toString();
final ServiceInfo serviceInfo = ServiceInfo.create(ksqlConfig, metricsPrefix);
final Map<String, String> metricsTags = ImmutableMap.<String, String>builder().putAll(serviceInfo.customMetricsTags()).put(KsqlConstants.KSQL_SERVICE_ID_METRICS_TAG, serviceInfo.serviceId()).build();
StorageUtilizationMetricsReporter.configureShared(new File(stateDir), metricCollectors.getMetrics(), ksqlConfig.getStringAsMap(KsqlConfig.KSQL_CUSTOM_METRICS_TAGS));
final ScheduledExecutorService executorService = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder().setNameFormat("ksql-csu-metrics-reporter-%d").build());
final ScheduledExecutorService leakedResourcesReporter = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder().setNameFormat("ksql-leaked-resources-metrics-reporter-%d").build());
final KsqlEngine ksqlEngine = new KsqlEngine(serviceContext, processingLogContext, functionRegistry, serviceInfo, specificQueryIdGenerator, new KsqlConfig(restConfig.getKsqlConfigProperties()), Collections.emptyList(), metricCollectors);
final PersistentQuerySaturationMetrics saturation = new PersistentQuerySaturationMetrics(ksqlEngine, new JmxDataPointsReporter(metricCollectors.getMetrics(), "ksqldb_utilization", Duration.ofMinutes(1)), Duration.ofMinutes(5), Duration.ofSeconds(30), ksqlConfig.getStringAsMap(KsqlConfig.KSQL_CUSTOM_METRICS_TAGS));
executorService.scheduleAtFixedRate(saturation, 0, Duration.ofMinutes(1).toMillis(), TimeUnit.MILLISECONDS);
final int transientQueryCleanupServicePeriod = ksqlConfig.getInt(KsqlConfig.KSQL_TRANSIENT_QUERY_CLEANUP_SERVICE_PERIOD_SECONDS);
final LeakedResourcesMetrics leaked = new LeakedResourcesMetrics(ksqlEngine, new JmxDataPointsReporter(metricCollectors.getMetrics(), ReservedInternalTopics.KSQL_INTERNAL_TOPIC_PREFIX + ksqlConfig.getString(KsqlConfig.KSQL_SERVICE_ID_CONFIG) + ".leaked_resources_metrics", Duration.ofSeconds(transientQueryCleanupServicePeriod)), ksqlConfig.getStringAsMap(KsqlConfig.KSQL_CUSTOM_METRICS_TAGS));
leakedResourcesReporter.scheduleAtFixedRate(leaked, 0, transientQueryCleanupServicePeriod, TimeUnit.SECONDS);
UserFunctionLoader.newInstance(ksqlConfig, functionRegistry, ksqlInstallDir, metricCollectors.getMetrics()).load();
final String commandTopicName = ReservedInternalTopics.commandTopic(ksqlConfig);
final Admin internalAdmin = createCommandTopicAdminClient(restConfig, ksqlConfig);
final KafkaTopicClient internalTopicClient = new KafkaTopicClientImpl(() -> internalAdmin);
final CommandStore commandStore = CommandStore.Factory.create(ksqlConfig, commandTopicName, Duration.ofMillis(restConfig.getLong(DISTRIBUTED_COMMAND_RESPONSE_TIMEOUT_MS_CONFIG)), ksqlConfig.addConfluentMetricsContextConfigsKafka(restConfig.getCommandConsumerProperties()), ksqlConfig.addConfluentMetricsContextConfigsKafka(restConfig.getCommandProducerProperties()), internalTopicClient);
final InteractiveStatementExecutor statementExecutor = new InteractiveStatementExecutor(serviceContext, ksqlEngine, specificQueryIdGenerator);
final StatusResource statusResource = new StatusResource(statementExecutor);
final VersionCheckerAgent versionChecker = versionCheckerFactory.apply(ksqlEngine::hasActiveQueries);
final ServerState serverState = new ServerState();
final KsqlSecurityExtension securityExtension = loadSecurityExtension(ksqlConfig);
final KsqlSecurityContextProvider ksqlSecurityContextProvider = new DefaultKsqlSecurityContextProvider(securityExtension, defaultServiceContextFactory, userServiceContextFactory, ksqlConfig, schemaRegistryClientFactory, connectClientFactory, sharedClient);
final Optional<AuthenticationPlugin> securityHandlerPlugin = loadAuthenticationPlugin(restConfig);
final Optional<KsqlAuthorizationValidator> authorizationValidator = KsqlAuthorizationValidatorFactory.create(ksqlConfig, serviceContext, securityExtension.getAuthorizationProvider());
final Errors errorHandler = new Errors(restConfig.getConfiguredInstance(KsqlRestConfig.KSQL_SERVER_ERROR_MESSAGES, ErrorMessages.class));
final ConnectServerErrors connectErrorHandler = loadConnectErrorHandler(ksqlConfig);
final Optional<LagReportingAgent> lagReportingAgent = initializeLagReportingAgent(restConfig, ksqlEngine, serviceContext);
final Optional<HeartbeatAgent> heartbeatAgent = initializeHeartbeatAgent(restConfig, ksqlEngine, serviceContext, lagReportingAgent);
final RoutingFilterFactory routingFilterFactory = initializeRoutingFilterFactory(ksqlConfig, heartbeatAgent, lagReportingAgent);
final RateLimiter pullQueryRateLimiter = new RateLimiter(ksqlConfig.getInt(KsqlConfig.KSQL_QUERY_PULL_MAX_QPS_CONFIG), "pull", metricCollectors.getMetrics(), metricsTags);
final ConcurrencyLimiter pullQueryConcurrencyLimiter = new ConcurrencyLimiter(ksqlConfig.getInt(KsqlConfig.KSQL_QUERY_PULL_MAX_CONCURRENT_REQUESTS_CONFIG), "pull", metricCollectors.getMetrics(), metricsTags);
final SlidingWindowRateLimiter pullBandRateLimiter = new SlidingWindowRateLimiter(ksqlConfig.getInt(KsqlConfig.KSQL_QUERY_PULL_MAX_HOURLY_BANDWIDTH_MEGABYTES_CONFIG), NUM_MILLISECONDS_IN_HOUR, "pull", metricCollectors.getMetrics(), metricsTags);
final SlidingWindowRateLimiter scalablePushBandRateLimiter = new SlidingWindowRateLimiter(ksqlConfig.getInt(KsqlConfig.KSQL_QUERY_PUSH_V2_MAX_HOURLY_BANDWIDTH_MEGABYTES_CONFIG), NUM_MILLISECONDS_IN_HOUR, "push", metricCollectors.getMetrics(), metricsTags);
final DenyListPropertyValidator denyListPropertyValidator = new DenyListPropertyValidator(ksqlConfig.getList(KsqlConfig.KSQL_PROPERTIES_OVERRIDES_DENYLIST));
final Optional<PullQueryExecutorMetrics> pullQueryMetrics = ksqlConfig.getBoolean(KsqlConfig.KSQL_QUERY_PULL_METRICS_ENABLED) ? Optional.of(new PullQueryExecutorMetrics(ksqlEngine.getServiceId(), ksqlConfig.getStringAsMap(KsqlConfig.KSQL_CUSTOM_METRICS_TAGS), Time.SYSTEM, metricCollectors.getMetrics())) : Optional.empty();
final Optional<ScalablePushQueryMetrics> scalablePushQueryMetrics = ksqlConfig.getBoolean(KsqlConfig.KSQL_QUERY_PUSH_V2_ENABLED) ? Optional.of(new ScalablePushQueryMetrics(ksqlEngine.getServiceId(), ksqlConfig.getStringAsMap(KsqlConfig.KSQL_CUSTOM_METRICS_TAGS), Time.SYSTEM, metricCollectors.getMetrics())) : Optional.empty();
final HARouting pullQueryRouting = new HARouting(routingFilterFactory, pullQueryMetrics, ksqlConfig);
final PushRouting pushQueryRouting = new PushRouting();
final Optional<LocalCommands> localCommands = createLocalCommands(restConfig, ksqlEngine);
final QueryExecutor queryExecutor = new QueryExecutor(ksqlEngine, restConfig, ksqlConfig, pullQueryMetrics, scalablePushQueryMetrics, pullQueryRateLimiter, pullQueryConcurrencyLimiter, pullBandRateLimiter, scalablePushBandRateLimiter, pullQueryRouting, pushQueryRouting, localCommands);
final StreamedQueryResource streamedQueryResource = new StreamedQueryResource(ksqlEngine, restConfig, commandStore, Duration.ofMillis(restConfig.getLong(KsqlRestConfig.STREAMED_QUERY_DISCONNECT_CHECK_MS_CONFIG)), Duration.ofMillis(restConfig.getLong(DISTRIBUTED_COMMAND_RESPONSE_TIMEOUT_MS_CONFIG)), versionChecker::updateLastRequestTime, authorizationValidator, errorHandler, denyListPropertyValidator, queryExecutor);
final List<String> managedTopics = new LinkedList<>();
managedTopics.add(commandTopicName);
if (processingLogConfig.getBoolean(ProcessingLogConfig.TOPIC_AUTO_CREATE)) {
managedTopics.add(ProcessingLogServerUtils.getTopicName(processingLogConfig, ksqlConfig));
}
final CommandRunner commandRunner = new CommandRunner(statementExecutor, commandStore, maxStatementRetries, new ClusterTerminator(ksqlEngine, serviceContext, managedTopics), serverState, ksqlConfig.getString(KsqlConfig.KSQL_SERVICE_ID_CONFIG), Duration.ofMillis(restConfig.getLong(KsqlRestConfig.KSQL_COMMAND_RUNNER_BLOCKED_THRESHHOLD_ERROR_MS)), metricsPrefix, InternalTopicSerdes.deserializer(Command.class), errorHandler, internalTopicClient, commandTopicName, metricCollectors.getMetrics());
final KsqlResource ksqlResource = new KsqlResource(ksqlEngine, commandRunner, Duration.ofMillis(restConfig.getLong(DISTRIBUTED_COMMAND_RESPONSE_TIMEOUT_MS_CONFIG)), versionChecker::updateLastRequestTime, authorizationValidator, errorHandler, connectErrorHandler, denyListPropertyValidator);
final List<KsqlServerPrecondition> preconditions = restConfig.getConfiguredInstances(KsqlRestConfig.KSQL_SERVER_PRECONDITIONS, KsqlServerPrecondition.class);
final List<KsqlConfigurable> configurables = ImmutableList.of(ksqlResource, streamedQueryResource, statementExecutor);
final Consumer<KsqlConfig> rocksDBConfigSetterHandler = RocksDBConfigSetterHandler::maybeConfigureRocksDBConfigSetter;
return new KsqlRestApplication(serviceContext, ksqlEngine, ksqlConfig, restConfig, commandRunner, commandStore, statusResource, streamedQueryResource, ksqlResource, versionChecker, ksqlSecurityContextProvider, securityExtension, securityHandlerPlugin, serverState, processingLogContext, preconditions, configurables, rocksDBConfigSetterHandler, heartbeatAgent, lagReportingAgent, vertx, denyListPropertyValidator, pullQueryMetrics, scalablePushQueryMetrics, localCommands, queryExecutor, metricCollectors, internalTopicClient, internalAdmin);
}
Aggregations