use of io.confluent.ksql.metastore.model.DataSource in project ksql by confluentinc.
the class QueryBuilder method buildPersistentQueryInSharedRuntime.
@SuppressWarnings("ParameterNumber")
PersistentQueryMetadata buildPersistentQueryInSharedRuntime(final KsqlConfig ksqlConfig, final KsqlConstants.PersistentQueryType persistentQueryType, final String statementText, final QueryId queryId, final Optional<DataSource> sinkDataSource, final Set<DataSource> sources, final ExecutionStep<?> physicalPlan, final String planSummary, final QueryMetadata.Listener listener, final Supplier<List<PersistentQueryMetadata>> allPersistentQueries, final String applicationId, final MetricCollectors metricCollectors) {
final SharedKafkaStreamsRuntime sharedKafkaStreamsRuntime = getKafkaStreamsInstance(applicationId, sources.stream().map(DataSource::getName).collect(Collectors.toSet()), queryId, metricCollectors);
final Map<String, Object> queryOverrides = sharedKafkaStreamsRuntime.getStreamProperties();
final LogicalSchema logicalSchema;
final KeyFormat keyFormat;
final ValueFormat valueFormat;
final KsqlTopic ksqlTopic;
switch(persistentQueryType) {
// CREATE_SOURCE does not have a sink, so the schema is obtained from the query source
case CREATE_SOURCE:
final DataSource dataSource = Iterables.getOnlyElement(sources);
logicalSchema = dataSource.getSchema();
keyFormat = dataSource.getKsqlTopic().getKeyFormat();
valueFormat = dataSource.getKsqlTopic().getValueFormat();
ksqlTopic = dataSource.getKsqlTopic();
break;
default:
logicalSchema = sinkDataSource.get().getSchema();
keyFormat = sinkDataSource.get().getKsqlTopic().getKeyFormat();
valueFormat = sinkDataSource.get().getKsqlTopic().getValueFormat();
ksqlTopic = sinkDataSource.get().getKsqlTopic();
break;
}
final PhysicalSchema querySchema = PhysicalSchema.from(logicalSchema, keyFormat.getFeatures(), valueFormat.getFeatures());
final NamedTopologyBuilder namedTopologyBuilder = sharedKafkaStreamsRuntime.getKafkaStreams().newNamedTopologyBuilder(queryId.toString(), PropertiesUtil.asProperties(queryOverrides));
final RuntimeBuildContext runtimeBuildContext = buildContext(applicationId, queryId, namedTopologyBuilder);
final Object result = buildQueryImplementation(physicalPlan, runtimeBuildContext);
final NamedTopology topology = namedTopologyBuilder.build();
final Optional<MaterializationProviderBuilderFactory.MaterializationProviderBuilder> materializationProviderBuilder = getMaterializationInfo(result).map(info -> materializationProviderBuilderFactory.materializationProviderBuilder(info, querySchema, keyFormat, queryOverrides, applicationId, queryId.toString()));
final Optional<ScalablePushRegistry> scalablePushRegistry = applyScalablePushProcessor(querySchema.logicalSchema(), result, allPersistentQueries, queryOverrides, applicationId, ksqlConfig, ksqlTopic, serviceContext);
final BinPackedPersistentQueryMetadataImpl binPackedPersistentQueryMetadata = new BinPackedPersistentQueryMetadataImpl(persistentQueryType, statementText, querySchema, sources.stream().map(DataSource::getName).collect(Collectors.toSet()), planSummary, applicationId, topology, sharedKafkaStreamsRuntime, runtimeBuildContext.getSchemas(), config.getOverrides(), queryId, materializationProviderBuilder, physicalPlan, getUncaughtExceptionProcessingLogger(queryId), sinkDataSource, listener, queryOverrides, scalablePushRegistry, (streamsRuntime) -> getNamedTopology(streamsRuntime, queryId, applicationId, queryOverrides, physicalPlan));
if (real) {
return binPackedPersistentQueryMetadata;
} else {
return SandboxedBinPackedPersistentQueryMetadataImpl.of(binPackedPersistentQueryMetadata, listener);
}
}
use of io.confluent.ksql.metastore.model.DataSource in project ksql by confluentinc.
the class KsqlAuthorizationValidatorImpl method getCreateAsSelectSinkTopic.
private KsqlTopic getCreateAsSelectSinkTopic(final MetaStore metaStore, final CreateAsSelect createAsSelect) {
final CreateSourceAsProperties properties = createAsSelect.getProperties();
final String sinkTopicName;
final KeyFormat sinkKeyFormat;
final ValueFormat sinkValueFormat;
if (!properties.getKafkaTopic().isPresent()) {
final DataSource dataSource = metaStore.getSource(createAsSelect.getName());
if (dataSource != null) {
sinkTopicName = dataSource.getKafkaTopicName();
sinkKeyFormat = dataSource.getKsqlTopic().getKeyFormat();
sinkValueFormat = dataSource.getKsqlTopic().getValueFormat();
} else {
throw new KsqlException("Cannot validate for topic access from an unknown stream/table: " + createAsSelect.getName());
}
} else {
sinkTopicName = properties.getKafkaTopic().get();
// If no format is specified for the sink topic, then use the format from the primary
// source topic.
final SourceTopicsExtractor extractor = new SourceTopicsExtractor(metaStore);
extractor.process(createAsSelect.getQuery(), null);
final KsqlTopic primaryKsqlTopic = extractor.getPrimarySourceTopic();
final Optional<Format> keyFormat = properties.getKeyFormat().map(formatName -> FormatFactory.fromName(formatName));
final Optional<Format> valueFormat = properties.getValueFormat().map(formatName -> FormatFactory.fromName(formatName));
sinkKeyFormat = keyFormat.map(format -> KeyFormat.of(FormatInfo.of(format.name()), format.supportsFeature(SerdeFeature.SCHEMA_INFERENCE) ? SerdeFeatures.of(SerdeFeature.SCHEMA_INFERENCE) : SerdeFeatures.of(), Optional.empty())).orElse(primaryKsqlTopic.getKeyFormat());
sinkValueFormat = valueFormat.map(format -> ValueFormat.of(FormatInfo.of(format.name()), format.supportsFeature(SerdeFeature.SCHEMA_INFERENCE) ? SerdeFeatures.of(SerdeFeature.SCHEMA_INFERENCE) : SerdeFeatures.of())).orElse(primaryKsqlTopic.getValueFormat());
}
return new KsqlTopic(sinkTopicName, sinkKeyFormat, sinkValueFormat);
}
use of io.confluent.ksql.metastore.model.DataSource in project ksql by confluentinc.
the class TopicDeleteInjectorTest method shouldThrowIfTopicDoesNotExist.
@Test
public void shouldThrowIfTopicDoesNotExist() {
// Given:
final SourceName STREAM_1 = SourceName.of("stream1");
final DataSource other1 = givenSource(STREAM_1, "topicName");
when(metaStore.getSource(STREAM_1)).thenAnswer(inv -> other1);
when(other1.getKafkaTopicName()).thenReturn("topicName");
final ConfiguredStatement<DropStream> dropStatement = givenStatement("DROP stream1 DELETE TOPIC;", new DropStream(SourceName.of("stream1"), true, true));
doThrow(RuntimeException.class).when(topicClient).deleteTopics(ImmutableList.of("topicName"));
// When:
final Exception e = assertThrows(RuntimeException.class, () -> deleteInjector.inject(dropStatement));
// Then:
assertThat(e.getMessage(), containsString("" + "Could not delete the corresponding kafka topic: topicName"));
}
use of io.confluent.ksql.metastore.model.DataSource in project ksql by confluentinc.
the class TopicDeleteInjectorTest method givenSource.
private static DataSource givenSource(final SourceName name, final String topicName) {
final DataSource source = mock(DataSource.class);
when(source.getName()).thenReturn(name);
when(source.getKafkaTopicName()).thenReturn(topicName);
return source;
}
use of io.confluent.ksql.metastore.model.DataSource in project ksql by confluentinc.
the class MetaStoreImpl method putSource.
@Override
public void putSource(final DataSource dataSource, final boolean allowReplace) {
final SourceInfo existing = dataSources.get(dataSource.getName());
if (existing != null && !allowReplace) {
final SourceName name = dataSource.getName();
final String newType = dataSource.getDataSourceType().getKsqlType().toLowerCase();
final String existingType = existing.source.getDataSourceType().getKsqlType().toLowerCase();
throw new KsqlException(String.format("Cannot add %s '%s': A %s with the same name already exists", newType, name.text(), existingType));
} else if (existing != null) {
existing.source.canUpgradeTo(dataSource).ifPresent(msg -> {
throw new KsqlException("Cannot upgrade data source: " + msg);
});
}
// Replace the dataSource if one exists, which may contain changes in the Schema, with
// a copy of the previous source info
dataSources.put(dataSource.getName(), (existing != null) ? existing.copyWith(dataSource) : new SourceInfo(dataSource));
LOG.info("Source {} created on the metastore", dataSource.getName().text());
// Re-build the DROP constraints if existing sources have references to this new source.
// This logic makes sure that drop constraints are set back if sources were deleted during
// the metastore restoration (See deleteSource()).
dataSources.forEach((name, info) -> {
info.references.forEach(ref -> {
if (ref.equals(dataSource.getName())) {
LOG.debug("Add a drop constraint reference back to source '{}' from source '{}'", dataSource.getName().text(), name.text());
addConstraint(dataSource.getName(), name);
}
});
});
}
Aggregations