use of io.confluent.ksql.parser.tree.UnsetProperty in project ksql by confluentinc.
the class PropertyOverrider method unset.
public static void unset(final ConfiguredStatement<UnsetProperty> statement, final Map<String, Object> mutableProperties) {
final UnsetProperty unsetProperty = statement.getStatement();
throwIfInvalidProperty(unsetProperty.getPropertyName(), statement.getStatementText());
mutableProperties.remove(unsetProperty.getPropertyName());
}
use of io.confluent.ksql.parser.tree.UnsetProperty in project ksql by confluentinc.
the class KsqlTesterTest method execute.
@SuppressWarnings("unchecked")
private void execute(final ParsedStatement parsedStatement) {
final PreparedStatement<?> engineStatement = engine.prepare(parsedStatement);
final ConfiguredStatement<?> configured = ConfiguredStatement.of(engineStatement, SessionConfig.of(config, overrides));
createTopics(engineStatement);
if (engineStatement.getStatement() instanceof InsertValues) {
pipeInput((ConfiguredStatement<InsertValues>) configured);
return;
} else if (engineStatement.getStatement() instanceof SetProperty) {
PropertyOverrider.set((ConfiguredStatement<SetProperty>) configured, overrides);
return;
} else if (engineStatement.getStatement() instanceof UnsetProperty) {
PropertyOverrider.unset((ConfiguredStatement<UnsetProperty>) configured, overrides);
return;
}
final ConfiguredStatement<?> injected = formatInjector.inject(configured);
final ExecuteResult result = engine.execute(serviceContext, injected);
// is DDL statement
if (!result.getQuery().isPresent()) {
return;
}
final PersistentQueryMetadata query = (PersistentQueryMetadata) result.getQuery().get();
final Topology topology = query.getTopology();
final Properties properties = new Properties();
properties.putAll(query.getStreamsProperties());
properties.put(StreamsConfig.STATE_DIR_CONFIG, tmpFolder.getRoot().getAbsolutePath());
final TopologyTestDriver driver = new TopologyTestDriver(topology, properties);
final List<TopicInfo> inputTopics = query.getSourceNames().stream().map(sn -> engine.getMetaStore().getSource(sn)).map(ds -> new TopicInfo(ds.getKafkaTopicName(), keySerde(ds), valueSerde(ds))).collect(Collectors.toList());
// Sink may be Optional for source tables. Once source table query execution is supported, then
// we would need have a condition to not create an output topic info
final DataSource output = engine.getMetaStore().getSource(query.getSinkName().get());
final TopicInfo outputInfo = new TopicInfo(output.getKafkaTopicName(), keySerde(output), valueSerde(output));
driverPipeline.addDriver(driver, inputTopics, outputInfo);
drivers.put(query.getQueryId(), new DriverAndProperties(driver, properties));
}
Aggregations