use of io.confluent.ksql.metastore.model.DataSource in project ksql by confluentinc.
the class SourceDescriptionFactoryTest method shouldReturnEmptyTimestampColumn.
@Test
public void shouldReturnEmptyTimestampColumn() {
// Given:
final String kafkaTopicName = "kafka";
final DataSource dataSource = buildDataSource(kafkaTopicName, Optional.empty());
// When
final SourceDescription sourceDescription = SourceDescriptionFactory.create(dataSource, true, Collections.emptyList(), Collections.emptyList(), Optional.empty(), Collections.emptyList(), Collections.emptyList(), new MetricCollectors());
// Then:
assertThat(sourceDescription.getTimestamp(), is(""));
}
use of io.confluent.ksql.metastore.model.DataSource in project ksql by confluentinc.
the class SourceDescriptionFactoryTest method shouldReturnSourceConstraints.
@Test
public void shouldReturnSourceConstraints() {
// Given:
final String kafkaTopicName = "kafka";
final DataSource dataSource = buildDataSource(kafkaTopicName, Optional.empty());
// When
final SourceDescription sourceDescription = SourceDescriptionFactory.create(dataSource, true, Collections.emptyList(), Collections.emptyList(), Optional.empty(), Collections.emptyList(), ImmutableList.of("s1", "s2"), new MetricCollectors());
// Then:
assertThat(sourceDescription.getSourceConstraints(), hasItems("s1", "s2"));
}
use of io.confluent.ksql.metastore.model.DataSource in project ksql by confluentinc.
the class LogicalPlannerTest method shouldCreatePlanWithTableAsSource.
@Test
public void shouldCreatePlanWithTableAsSource() {
final PlanNode planNode = buildLogicalPlan("select col0 from TEST2 EMIT CHANGES limit 5;");
assertThat(planNode.getSources().size(), equalTo(1));
final DataSource dataSource = ((DataSourceNode) planNode.getSources().get(0).getSources().get(0).getSources().get(0)).getDataSource();
assertThat(dataSource.getDataSourceType(), equalTo(DataSourceType.KTABLE));
assertThat(dataSource.getName(), equalTo(SourceName.of("TEST2")));
}
use of io.confluent.ksql.metastore.model.DataSource in project ksql by confluentinc.
the class ClusterTerminator method subjectNames.
private static Set<String> subjectNames(final List<DataSource> sources) {
final Set<String> subjects = new HashSet<>();
for (DataSource s : sources) {
final String keyFormat = s.getKsqlTopic().getKeyFormat().getFormat();
if (FormatFactory.fromName(keyFormat).supportsFeature(SerdeFeature.SCHEMA_INFERENCE)) {
subjects.add(KsqlConstants.getSRSubject(s.getKafkaTopicName(), true));
}
final String valueFormat = s.getKsqlTopic().getValueFormat().getFormat();
if (FormatFactory.fromName(valueFormat).supportsFeature(SerdeFeature.SCHEMA_INFERENCE)) {
subjects.add(KsqlConstants.getSRSubject(s.getKafkaTopicName(), false));
}
}
return subjects;
}
use of io.confluent.ksql.metastore.model.DataSource in project ksql by confluentinc.
the class QueryExecutor method handleQuery.
private QueryMetadataHolder handleQuery(final ServiceContext serviceContext, final PreparedStatement<Query> statement, final Optional<Boolean> isInternalRequest, final MetricsCallbackHolder metricsCallbackHolder, final Map<String, Object> configOverrides, final Map<String, Object> requestProperties, final Context context, final boolean excludeTombstones) {
if (statement.getStatement().isPullQuery()) {
final ImmutableAnalysis analysis = ksqlEngine.analyzeQueryWithNoOutputTopic(statement.getStatement(), statement.getStatementText(), configOverrides);
final DataSource dataSource = analysis.getFrom().getDataSource();
final DataSource.DataSourceType dataSourceType = dataSource.getDataSourceType();
if (!ksqlConfig.getBoolean(KsqlConfig.KSQL_PULL_QUERIES_ENABLE_CONFIG)) {
throw new KsqlStatementException("Pull queries are disabled." + PullQueryValidator.PULL_QUERY_SYNTAX_HELP + System.lineSeparator() + "Please set " + KsqlConfig.KSQL_PULL_QUERIES_ENABLE_CONFIG + "=true to enable " + "this feature." + System.lineSeparator(), statement.getStatementText());
}
Optional<ConsistencyOffsetVector> consistencyOffsetVector = Optional.empty();
if (ksqlConfig.getBoolean(KsqlConfig.KSQL_QUERY_PULL_CONSISTENCY_OFFSET_VECTOR_ENABLED) && requestProperties.containsKey(KsqlRequestConfig.KSQL_REQUEST_QUERY_PULL_CONSISTENCY_OFFSET_VECTOR)) {
final String serializedCV = (String) requestProperties.get(KsqlRequestConfig.KSQL_REQUEST_QUERY_PULL_CONSISTENCY_OFFSET_VECTOR);
// serializedCV will be empty on the first request as the consistency vector is initialized
// at the server
consistencyOffsetVector = serializedCV != null && !serializedCV.equals("") ? Optional.of(ConsistencyOffsetVector.deserialize(serializedCV)) : Optional.of(ConsistencyOffsetVector.emptyVector());
}
switch(dataSourceType) {
case KTABLE:
{
// First thing, set the metrics callback so that it gets called, even if we hit an error
final AtomicReference<PullQueryResult> resultForMetrics = new AtomicReference<>(null);
metricsCallbackHolder.setCallback(QueryMetricsUtil.initializePullTableMetricsCallback(pullQueryMetrics, pullBandRateLimiter, resultForMetrics));
final SessionConfig sessionConfig = SessionConfig.of(ksqlConfig, configOverrides);
final ConfiguredStatement<Query> configured = ConfiguredStatement.of(statement, sessionConfig);
return handleTablePullQuery(analysis, serviceContext, configured, requestProperties, isInternalRequest, pullBandRateLimiter, resultForMetrics, consistencyOffsetVector);
}
case KSTREAM:
{
// First thing, set the metrics callback so that it gets called, even if we hit an error
final AtomicReference<StreamPullQueryMetadata> resultForMetrics = new AtomicReference<>(null);
final AtomicReference<Decrementer> refDecrementer = new AtomicReference<>(null);
metricsCallbackHolder.setCallback(QueryMetricsUtil.initializePullStreamMetricsCallback(pullQueryMetrics, pullBandRateLimiter, analysis, resultForMetrics, refDecrementer));
final SessionConfig sessionConfig = SessionConfig.of(ksqlConfig, configOverrides);
final ConfiguredStatement<Query> configured = ConfiguredStatement.of(statement, sessionConfig);
return handleStreamPullQuery(analysis, serviceContext, configured, resultForMetrics, refDecrementer);
}
default:
throw new KsqlStatementException("Unexpected data source type for pull query: " + dataSourceType, statement.getStatementText());
}
} else if (ScalablePushUtil.isScalablePushQuery(statement.getStatement(), ksqlEngine, ksqlConfig, configOverrides)) {
// First thing, set the metrics callback so that it gets called, even if we hit an error
final AtomicReference<ScalablePushQueryMetadata> resultForMetrics = new AtomicReference<>(null);
metricsCallbackHolder.setCallback(QueryMetricsUtil.initializeScalablePushMetricsCallback(scalablePushQueryMetrics, scalablePushBandRateLimiter, resultForMetrics));
final ImmutableAnalysis analysis = ksqlEngine.analyzeQueryWithNoOutputTopic(statement.getStatement(), statement.getStatementText(), configOverrides);
QueryLogger.info("Scalable push query created", statement.getStatementText());
return handleScalablePushQuery(analysis, serviceContext, statement, configOverrides, requestProperties, context, scalablePushBandRateLimiter, resultForMetrics);
} else {
// log validated statements for query anonymization
QueryLogger.info("Transient query created", statement.getStatementText());
return handlePushQuery(serviceContext, statement, configOverrides, excludeTombstones);
}
}
Aggregations