use of io.confluent.ksql.metastore.model.DataSource in project ksql by confluentinc.
the class QueryRegistryImplTest method givenCreate.
private PersistentQueryMetadata givenCreate(final QueryRegistry registry, final String id, final String source, final Optional<String> sink, KsqlConstants.PersistentQueryType persistentQueryType) {
final QueryId queryId = new QueryId(id);
final PersistentQueryMetadata query = mock(PersistentQueryMetadataImpl.class);
final PersistentQueryMetadata newQuery = mock(BinPackedPersistentQueryMetadataImpl.class);
final DataSource sinkSource = mock(DataSource.class);
final ExecutionStep physicalPlan = mock(ExecutionStep.class);
sink.ifPresent(s -> {
when(sinkSource.getName()).thenReturn(SourceName.of(s));
when(query.getSinkName()).thenReturn(Optional.of(SourceName.of(s)));
when(newQuery.getSinkName()).thenReturn(Optional.of(SourceName.of(s)));
});
when(newQuery.getOverriddenProperties()).thenReturn(new HashMap<>());
when(newQuery.getQueryId()).thenReturn(queryId);
when(newQuery.getSink()).thenReturn(Optional.of(sinkSource));
when(newQuery.getSourceNames()).thenReturn(ImmutableSet.of(SourceName.of(source)));
when(newQuery.getPersistentQueryType()).thenReturn(persistentQueryType);
when(newQuery.getPhysicalPlan()).thenReturn(physicalPlan);
final SharedKafkaStreamsRuntime runtime = mock(SharedKafkaStreamsRuntimeImpl.class);
try {
Field sharedRuntime = BinPackedPersistentQueryMetadataImpl.class.getDeclaredField("sharedKafkaStreamsRuntime");
sharedRuntime.setAccessible(true);
sharedRuntime.set(newQuery, runtime);
} catch (NoSuchFieldException e) {
e.printStackTrace();
} catch (IllegalAccessException e) {
e.printStackTrace();
}
when(runtime.getNewQueryErrorQueue()).thenReturn(mock(QueryMetadataImpl.TimeBoundedQueue.class));
when(query.getQueryId()).thenReturn(queryId);
when(query.getSink()).thenReturn(Optional.of(sinkSource));
when(query.getSourceNames()).thenReturn(ImmutableSet.of(SourceName.of(source)));
when(query.getPersistentQueryType()).thenReturn(persistentQueryType);
when(query.getPhysicalPlan()).thenReturn(physicalPlan);
when(queryBuilder.buildPersistentQueryInSharedRuntime(any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any())).thenReturn(newQuery);
when(queryBuilder.buildPersistentQueryInDedicatedRuntime(any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any())).thenReturn(query);
when(config.getConfig(true)).thenReturn(ksqlConfig);
return registry.createOrReplacePersistentQuery(config, serviceContext, logContext, metaStore, "sql", queryId, Optional.of(sinkSource), ImmutableSet.of(toSource(source)), mock(ExecutionStep.class), "plan-summary", persistentQueryType, sharedRuntimes ? Optional.of("applicationId") : Optional.empty());
}
use of io.confluent.ksql.metastore.model.DataSource in project ksql by confluentinc.
the class TestExecutorUtil method buildStreamsTopologyTestDrivers.
static List<TopologyTestDriverContainer> buildStreamsTopologyTestDrivers(final TestCase testCase, final ServiceContext serviceContext, final KsqlEngine ksqlEngine, final KsqlConfig ksqlConfig, final StubKafkaService stubKafkaService, final TestExecutionListener listener) {
final KsqlConfig maybeUpdatedConfigs = testCase.applyPersistedProperties(ksqlConfig);
final List<PersistentQueryAndSources> queryMetadataList = doBuildQueries(testCase, serviceContext, ksqlEngine, maybeUpdatedConfigs, stubKafkaService, listener);
final List<TopologyTestDriverContainer> topologyTestDrivers = new ArrayList<>();
for (final PersistentQueryAndSources persistentQueryAndSources : queryMetadataList) {
final PersistentQueryMetadata persistentQueryMetadata = persistentQueryAndSources.getPersistentQueryMetadata();
final Properties streamsProperties = new Properties();
streamsProperties.putAll(persistentQueryMetadata.getStreamsProperties());
final Topology topology = persistentQueryMetadata.getTopology();
final TopologyTestDriver topologyTestDriver = new TopologyTestDriver(topology, streamsProperties, Instant.EPOCH);
final List<Topic> sourceTopics = persistentQueryAndSources.getSources().stream().map(dataSource -> {
stubKafkaService.requireTopicExists(dataSource.getKafkaTopicName());
return stubKafkaService.getTopic(dataSource.getKafkaTopicName());
}).collect(Collectors.toList());
final Optional<Topic> sinkTopic = persistentQueryMetadata.getSinkName().map(name -> buildSinkTopic(ksqlEngine.getMetaStore().getSource(name), stubKafkaService, serviceContext.getSchemaRegistryClient()));
testCase.setGeneratedTopologies(ImmutableList.of(persistentQueryMetadata.getTopologyDescription()));
testCase.setGeneratedSchemas(persistentQueryMetadata.getQuerySchemas().getLoggerSchemaInfo());
topologyTestDrivers.add(TopologyTestDriverContainer.of(topologyTestDriver, sourceTopics, sinkTopic));
}
return topologyTestDrivers;
}
use of io.confluent.ksql.metastore.model.DataSource in project ksql by confluentinc.
the class InsertsStreamEndpoint method createInsertsSubscriber.
public InsertsStreamSubscriber createInsertsSubscriber(final String caseInsensitiveTarget, final JsonObject properties, final Subscriber<InsertResult> acksSubscriber, final Context context, final WorkerExecutor workerExecutor, final ServiceContext serviceContext) {
VertxUtils.checkIsWorker();
if (!ksqlConfig.getBoolean(KsqlConfig.KSQL_INSERT_INTO_VALUES_ENABLED)) {
throw new KsqlApiException("The server has disabled INSERT INTO ... VALUES functionality. " + "To enable it, restart your ksqlDB server " + "with 'ksql.insert.into.values.enabled'=true", ERROR_CODE_BAD_REQUEST);
}
final String target;
try {
target = Identifiers.getIdentifierText(caseInsensitiveTarget);
} catch (IllegalArgumentException e) {
throw new KsqlApiException("Invalid target name: " + e.getMessage(), ERROR_CODE_BAD_STATEMENT);
}
final DataSource dataSource = getDataSource(ksqlEngine.getMetaStore(), SourceName.of(target));
return InsertsSubscriber.createInsertsSubscriber(serviceContext, properties, dataSource, ksqlConfig, context, acksSubscriber, workerExecutor);
}
use of io.confluent.ksql.metastore.model.DataSource in project ksql by confluentinc.
the class TopicDeleteInjector method checkTopicRefs.
private void checkTopicRefs(final DataSource source) {
final String topicName = source.getKafkaTopicName();
final SourceName sourceName = source.getName();
final Map<SourceName, DataSource> sources = metastore.getAllDataSources();
final String using = sources.values().stream().filter(s -> s.getKafkaTopicName().equals(topicName)).map(DataSource::getName).filter(name -> !sourceName.equals(name)).map(SourceName::text).sorted().collect(Collectors.joining(", "));
if (!using.isEmpty()) {
throw new RuntimeException(String.format("Refusing to delete topic. Found other data sources (%s) using topic %s", using, topicName));
}
}
use of io.confluent.ksql.metastore.model.DataSource in project ksql by confluentinc.
the class JsonFormatTest method readNormalResults.
private Map<GenericKey, GenericRow> readNormalResults(final String resultTopic, final int expectedNumMessages) {
final DataSource source = metaStore.getSource(SourceName.of(streamName));
final PhysicalSchema resultSchema = PhysicalSchema.from(source.getSchema(), source.getKsqlTopic().getKeyFormat().getFeatures(), source.getKsqlTopic().getValueFormat().getFeatures());
return TEST_HARNESS.verifyAvailableUniqueRows(resultTopic, expectedNumMessages, KAFKA, JSON, resultSchema);
}
Aggregations