use of io.confluent.ksql.services.ServiceContext in project ksql by confluentinc.
the class TestExecutor method create.
/**
* Create instance.
*
* <p>If {@code validateResults} is {@code true} the test will fail if the results are as
* expected. This is the norm. If {@code false} the test will not fail if the results differ. This
* is useful when re-writing the historical plans.
*
* @param validateResults flag to indicate if results should be validated.
* @param extensionDir Optional extension directory.
* @return the executor.
*/
public static TestExecutor create(final boolean validateResults, final Optional<String> extensionDir) {
final StubKafkaService kafkaService = StubKafkaService.create();
final StubKafkaClientSupplier kafkaClientSupplier = new StubKafkaClientSupplier();
final ServiceContext serviceContext = getServiceContext(kafkaClientSupplier);
return new TestExecutor(kafkaService, serviceContext, getKsqlEngine(serviceContext, extensionDir), TestExecutorUtil::buildStreamsTopologyTestDrivers, validateResults);
}
use of io.confluent.ksql.services.ServiceContext in project ksql by confluentinc.
the class KsqlTesterTest method execute.
@SuppressWarnings("unchecked")
private void execute(final ParsedStatement parsedStatement) {
final PreparedStatement<?> engineStatement = engine.prepare(parsedStatement);
final ConfiguredStatement<?> configured = ConfiguredStatement.of(engineStatement, SessionConfig.of(config, overrides));
createTopics(engineStatement);
if (engineStatement.getStatement() instanceof InsertValues) {
pipeInput((ConfiguredStatement<InsertValues>) configured);
return;
} else if (engineStatement.getStatement() instanceof SetProperty) {
PropertyOverrider.set((ConfiguredStatement<SetProperty>) configured, overrides);
return;
} else if (engineStatement.getStatement() instanceof UnsetProperty) {
PropertyOverrider.unset((ConfiguredStatement<UnsetProperty>) configured, overrides);
return;
}
final ConfiguredStatement<?> injected = formatInjector.inject(configured);
final ExecuteResult result = engine.execute(serviceContext, injected);
// is DDL statement
if (!result.getQuery().isPresent()) {
return;
}
final PersistentQueryMetadata query = (PersistentQueryMetadata) result.getQuery().get();
final Topology topology = query.getTopology();
final Properties properties = new Properties();
properties.putAll(query.getStreamsProperties());
properties.put(StreamsConfig.STATE_DIR_CONFIG, tmpFolder.getRoot().getAbsolutePath());
final TopologyTestDriver driver = new TopologyTestDriver(topology, properties);
final List<TopicInfo> inputTopics = query.getSourceNames().stream().map(sn -> engine.getMetaStore().getSource(sn)).map(ds -> new TopicInfo(ds.getKafkaTopicName(), keySerde(ds), valueSerde(ds))).collect(Collectors.toList());
// Sink may be Optional for source tables. Once source table query execution is supported, then
// we would need have a condition to not create an output topic info
final DataSource output = engine.getMetaStore().getSource(query.getSinkName().get());
final TopicInfo outputInfo = new TopicInfo(output.getKafkaTopicName(), keySerde(output), valueSerde(output));
driverPipeline.addDriver(driver, inputTopics, outputInfo);
drivers.put(query.getQueryId(), new DriverAndProperties(driver, properties));
}
use of io.confluent.ksql.services.ServiceContext in project ksql by confluentinc.
the class EngineExecutor method executeTablePullQuery.
/**
* Evaluates a pull query by first analyzing it, then building the logical plan and finally
* the physical plan. The execution is then done using the physical plan in a pipelined manner.
* @param statement The pull query
* @param routingOptions Configuration parameters used for HA routing
* @param pullQueryMetrics JMX metrics
* @return the rows that are the result of evaluating the pull query
*/
PullQueryResult executeTablePullQuery(final ImmutableAnalysis analysis, final ConfiguredStatement<Query> statement, final HARouting routing, final RoutingOptions routingOptions, final QueryPlannerOptions queryPlannerOptions, final Optional<PullQueryExecutorMetrics> pullQueryMetrics, final boolean startImmediately, final Optional<ConsistencyOffsetVector> consistencyOffsetVector) {
if (!statement.getStatement().isPullQuery()) {
throw new IllegalArgumentException("Executor can only handle pull queries");
}
final SessionConfig sessionConfig = statement.getSessionConfig();
// If we ever change how many hops a request can do, we'll need to update this for correct
// metrics.
final RoutingNodeType routingNodeType = routingOptions.getIsSkipForwardRequest() ? RoutingNodeType.REMOTE_NODE : RoutingNodeType.SOURCE_NODE;
PullPhysicalPlan plan = null;
try {
// Do not set sessionConfig.getConfig to true! The copying is inefficient and slows down pull
// query performance significantly. Instead use QueryPlannerOptions which check overrides
// deliberately.
final KsqlConfig ksqlConfig = sessionConfig.getConfig(false);
final LogicalPlanNode logicalPlan = buildAndValidateLogicalPlan(statement, analysis, ksqlConfig, queryPlannerOptions, false);
// This is a cancel signal that is used to stop both local operations and requests
final CompletableFuture<Void> shouldCancelRequests = new CompletableFuture<>();
plan = buildPullPhysicalPlan(logicalPlan, analysis, queryPlannerOptions, shouldCancelRequests, consistencyOffsetVector);
final PullPhysicalPlan physicalPlan = plan;
final PullQueryQueue pullQueryQueue = new PullQueryQueue(analysis.getLimitClause());
final PullQueryQueuePopulator populator = () -> routing.handlePullQuery(serviceContext, physicalPlan, statement, routingOptions, physicalPlan.getOutputSchema(), physicalPlan.getQueryId(), pullQueryQueue, shouldCancelRequests, consistencyOffsetVector);
final PullQueryResult result = new PullQueryResult(physicalPlan.getOutputSchema(), populator, physicalPlan.getQueryId(), pullQueryQueue, pullQueryMetrics, physicalPlan.getSourceType(), physicalPlan.getPlanType(), routingNodeType, physicalPlan::getRowsReadFromDataSource, shouldCancelRequests, consistencyOffsetVector);
if (startImmediately) {
result.start();
}
return result;
} catch (final Exception e) {
if (plan == null) {
pullQueryMetrics.ifPresent(m -> m.recordErrorRateForNoResult(1));
} else {
final PullPhysicalPlan physicalPlan = plan;
pullQueryMetrics.ifPresent(metrics -> metrics.recordErrorRate(1, physicalPlan.getSourceType(), physicalPlan.getPlanType(), routingNodeType));
}
final String stmtLower = statement.getStatementText().toLowerCase(Locale.ROOT);
final String messageLower = e.getMessage().toLowerCase(Locale.ROOT);
final String stackLower = Throwables.getStackTraceAsString(e).toLowerCase(Locale.ROOT);
// the contents of the query
if (messageLower.contains(stmtLower) || stackLower.contains(stmtLower)) {
final StackTraceElement loc = Iterables.getLast(Throwables.getCausalChain(e)).getStackTrace()[0];
LOG.error("Failure to execute pull query {} {}, not logging the error message since it " + "contains the query string, which may contain sensitive information. If you " + "see this LOG message, please submit a GitHub ticket and we will scrub " + "the statement text from the error at {}", routingOptions.debugString(), queryPlannerOptions.debugString(), loc);
} else {
LOG.error("Failure to execute pull query. {} {}", routingOptions.debugString(), queryPlannerOptions.debugString(), e);
}
LOG.debug("Failed pull query text {}, {}", statement.getStatementText(), e);
throw new KsqlStatementException(e.getMessage() == null ? "Server Error" + Arrays.toString(e.getStackTrace()) : e.getMessage(), statement.getStatementText(), e);
}
}
use of io.confluent.ksql.services.ServiceContext in project ksql by confluentinc.
the class StandaloneExecutorFunctionalTest method setUp.
@SuppressWarnings("unchecked")
@Before
public void setUp() throws Exception {
queryFile = TMP.newFile().toPath();
final Map<String, Object> properties = ImmutableMap.<String, Object>builder().putAll(KsqlConfigTestUtil.baseTestConfig()).put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, TEST_HARNESS.kafkaBootstrapServers()).put(KsqlConfig.SCHEMA_REGISTRY_URL_PROPERTY, "http://foo:8080").build();
final Function<KsqlConfig, ServiceContext> serviceContextFactory = config -> TestServiceContext.create(new KsqlConfig(properties), TEST_HARNESS.getServiceContext().getSchemaRegistryClientFactory());
standalone = StandaloneExecutorFactory.create((Map) properties, queryFile.toString(), ".", serviceContextFactory, KafkaConfigStore::new, activeQuerySupplier -> versionChecker, StandaloneExecutor::new, new MetricCollectors());
s1 = KsqlIdentifierTestUtil.uniqueIdentifierName("S1");
s2 = KsqlIdentifierTestUtil.uniqueIdentifierName("S2");
t1 = KsqlIdentifierTestUtil.uniqueIdentifierName("T1");
}
use of io.confluent.ksql.services.ServiceContext in project ksql by confluentinc.
the class DistributingExecutorTest method shouldThrowServerExceptionIfServerServiceContextIsDeniedAuthorization.
@Test
public void shouldThrowServerExceptionIfServerServiceContextIsDeniedAuthorization() {
// Given:
final KsqlSecurityContext userSecurityContext = new KsqlSecurityContext(Optional.empty(), SandboxedServiceContext.create(TestServiceContext.create()));
final PreparedStatement<Statement> preparedStatement = PreparedStatement.of("", new ListProperties(Optional.empty()));
final ConfiguredStatement<Statement> configured = ConfiguredStatement.of(preparedStatement, SessionConfig.of(KSQL_CONFIG, ImmutableMap.of()));
doNothing().when(authorizationValidator).checkAuthorization(eq(userSecurityContext), any(), any());
doThrow(KsqlTopicAuthorizationException.class).when(authorizationValidator).checkAuthorization(ArgumentMatchers.argThat(securityContext -> securityContext.getServiceContext() == serviceContext), any(), any());
// When:
final Exception e = assertThrows(KsqlServerException.class, () -> distributor.execute(configured, executionContext, userSecurityContext));
// Then:
assertThat(e.getCause(), (is(instanceOf(KsqlTopicAuthorizationException.class))));
}
Aggregations