use of io.confluent.ksql.metastore.MutableMetaStore in project ksql by confluentinc.
the class EngineExecutor method sourceTablePlan.
@SuppressFBWarnings(value = "NP_NULL_PARAM_DEREF_NONVIRTUAL")
private KsqlPlan sourceTablePlan(final ConfiguredStatement<?> statement) {
final CreateTable createTable = (CreateTable) statement.getStatement();
final CreateTableCommand ddlCommand = (CreateTableCommand) engineContext.createDdlCommand(statement.getStatementText(), (ExecutableDdlStatement) statement.getStatement(), config);
final Relation from = new AliasedRelation(new Table(createTable.getName()), createTable.getName());
// Only VALUE or HEADER columns must be selected from the source table. When running a
// pull query, the keys are added if selecting all columns.
final Select select = new Select(createTable.getElements().stream().filter(column -> !column.getConstraints().isKey() && !column.getConstraints().isPrimaryKey()).map(column -> new SingleColumn(new UnqualifiedColumnReferenceExp(column.getName()), Optional.of(column.getName()))).collect(Collectors.toList()));
// Source table need to keep emitting changes so every new record is materialized for
// pull query availability.
final RefinementInfo refinementInfo = RefinementInfo.of(OutputRefinement.CHANGES);
// This is a plan for a `select * from <source-table> emit changes` statement,
// without a sink topic to write the results. The query is just made to materialize the
// source table.
final Query query = new Query(Optional.empty(), select, from, Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.of(refinementInfo), false, OptionalInt.empty());
// The source table does not exist in the current metastore, so a temporary metastore that
// contains only the source table is created here. This metastore is used later to create
// ExecutorsPlan.
final MutableMetaStore tempMetastore = new MetaStoreImpl(new InternalFunctionRegistry());
final Formats formats = ddlCommand.getFormats();
tempMetastore.putSource(new KsqlTable<>(statement.getStatementText(), createTable.getName(), ddlCommand.getSchema(), Optional.empty(), false, new KsqlTopic(ddlCommand.getTopicName(), KeyFormat.of(formats.getKeyFormat(), formats.getKeyFeatures(), Optional.empty()), ValueFormat.of(formats.getValueFormat(), formats.getValueFeatures())), true), false);
final ExecutorPlans plans = planQuery(statement, query, Optional.empty(), Optional.empty(), tempMetastore);
final KsqlBareOutputNode outputNode = (KsqlBareOutputNode) plans.logicalPlan.getNode().get();
final QueryPlan queryPlan = new QueryPlan(getSourceNames(outputNode), Optional.empty(), plans.physicalPlan.getPhysicalPlan(), plans.physicalPlan.getQueryId(), getApplicationId(plans.physicalPlan.getQueryId(), getSourceNames(outputNode)));
engineContext.createQueryValidator().validateQuery(config, plans.physicalPlan, engineContext.getQueryRegistry().getAllLiveQueries());
return KsqlPlan.queryPlanCurrent(statement.getStatementText(), Optional.of(ddlCommand), queryPlan);
}
use of io.confluent.ksql.metastore.MutableMetaStore in project ksql by confluentinc.
the class TestExecutor method getKsqlEngine.
static KsqlEngine getKsqlEngine(final ServiceContext serviceContext, final Optional<String> extensionDir) {
final FunctionRegistry functionRegistry;
if (extensionDir.isPresent()) {
final MutableFunctionRegistry mutable = new InternalFunctionRegistry();
UdfLoaderUtil.load(mutable, extensionDir.get());
functionRegistry = mutable;
} else {
functionRegistry = TestFunctionRegistry.INSTANCE.get();
}
final MutableMetaStore metaStore = new MetaStoreImpl(functionRegistry);
final MetricCollectors metricCollectors = new MetricCollectors();
return new KsqlEngine(serviceContext, ProcessingLogContext.create(), "test_instance_", metaStore, (engine) -> new KsqlEngineMetrics("", engine, Collections.emptyMap(), Optional.empty(), metricCollectors), new SequentialQueryIdGenerator(), KsqlConfig.empty(), Collections.emptyList(), metricCollectors);
}
use of io.confluent.ksql.metastore.MutableMetaStore in project ksql by confluentinc.
the class TestCaseBuilderUtil method getAllTopics.
public static Collection<Topic> getAllTopics(final Collection<String> statements, final Collection<Topic> topics, final Collection<Record> outputs, final Collection<Record> inputs, final FunctionRegistry functionRegistry, final KsqlConfig ksqlConfig) {
final Map<String, Topic> allTopics = new HashMap<>();
// Add all topics from topic nodes to the map:
topics.forEach(topic -> allTopics.put(topic.getName(), topic));
// Infer topics if not added already:
final MutableMetaStore metaStore = new MetaStoreImpl(functionRegistry);
for (String sql : statements) {
final Topic topicFromStatement = createTopicFromStatement(sql, metaStore, ksqlConfig);
if (topicFromStatement != null) {
allTopics.computeIfPresent(topicFromStatement.getName(), (key, topic) -> {
final Optional<ParsedSchema> keySchema = Optional.of(topic.getKeySchema()).filter(Optional::isPresent).orElse(topicFromStatement.getKeySchema());
final Optional<ParsedSchema> valueSchema = Optional.of(topic.getValueSchema()).filter(Optional::isPresent).orElse(topicFromStatement.getValueSchema());
topic = new Topic(topic.getName(), topic.getNumPartitions(), topic.getReplicas(), keySchema, valueSchema, topic.getKeyFeatures(), topic.getValueFeatures());
return topic;
});
if (allTopics.containsKey(topicFromStatement.getName())) {
// If the topic already exists, just add the key/value serde features
final Topic existingTopic = allTopics.get(topicFromStatement.getName());
allTopics.put(topicFromStatement.getName(), new Topic(existingTopic.getName(), existingTopic.getNumPartitions(), existingTopic.getReplicas(), existingTopic.getKeySchema(), existingTopic.getValueSchema(), topicFromStatement.getKeyFeatures(), topicFromStatement.getValueFeatures()));
} else {
allTopics.put(topicFromStatement.getName(), topicFromStatement);
}
}
}
// Get topics from inputs and outputs fields:
Streams.concat(inputs.stream(), outputs.stream()).map(record -> new Topic(record.getTopicName(), Optional.empty(), Optional.empty())).forEach(topic -> allTopics.putIfAbsent(topic.getName(), topic));
return allTopics.values();
}
use of io.confluent.ksql.metastore.MutableMetaStore in project ksql by confluentinc.
the class MetaStoreFixture method getNewMetaStore.
public static MutableMetaStore getNewMetaStore(final FunctionRegistry functionRegistry, final ValueFormat valueFormat) {
final MutableMetaStore metaStore = new MetaStoreImpl(functionRegistry);
final KeyFormat keyFormat = KeyFormat.nonWindowed(FormatInfo.of(FormatFactory.KAFKA.name()), SerdeFeatures.of());
final LogicalSchema test1Schema = LogicalSchema.builder().keyColumn(ColumnName.of("COL0"), SqlTypes.BIGINT).valueColumn(ColumnName.of("COL1"), SqlTypes.STRING).valueColumn(ColumnName.of("COL2"), SqlTypes.STRING).valueColumn(ColumnName.of("COL3"), SqlTypes.DOUBLE).valueColumn(ColumnName.of("COL4"), SqlTypes.array(SqlTypes.DOUBLE)).valueColumn(ColumnName.of("COL5"), SqlTypes.map(SqlTypes.STRING, SqlTypes.DOUBLE)).headerColumn(ColumnName.of("HEAD"), Optional.empty()).build();
final KsqlTopic ksqlTopic0 = new KsqlTopic("test0", keyFormat, valueFormat);
final KsqlStream<?> ksqlStream0 = new KsqlStream<>("sqlexpression", SourceName.of("TEST0"), test1Schema, Optional.empty(), false, ksqlTopic0, false);
metaStore.putSource(ksqlStream0, false);
final KsqlTopic ksqlTopic1 = new KsqlTopic("test1", keyFormat, valueFormat);
final KsqlStream<?> ksqlStream1 = new KsqlStream<>("sqlexpression", SourceName.of("TEST1"), test1Schema, Optional.empty(), false, ksqlTopic1, false);
metaStore.putSource(ksqlStream1, false);
final LogicalSchema test2Schema = LogicalSchema.builder().keyColumn(ColumnName.of("COL0"), SqlTypes.BIGINT).valueColumn(ColumnName.of("COL1"), SqlTypes.STRING).valueColumn(ColumnName.of("COL2"), SqlTypes.STRING).valueColumn(ColumnName.of("COL3"), SqlTypes.DOUBLE).valueColumn(ColumnName.of("COL4"), SqlTypes.BOOLEAN).build();
final KsqlTopic ksqlTopic2 = new KsqlTopic("test2", keyFormat, valueFormat);
final KsqlTable<String> ksqlTable = new KsqlTable<>("sqlexpression", SourceName.of("TEST2"), test2Schema, Optional.empty(), false, ksqlTopic2, false);
metaStore.putSource(ksqlTable, false);
final SqlType addressSchema = SqlTypes.struct().field("NUMBER", SqlTypes.BIGINT).field("STREET", SqlTypes.STRING).field("CITY", SqlTypes.STRING).field("STATE", SqlTypes.STRING).field("ZIPCODE", SqlTypes.BIGINT).build();
final SqlType categorySchema = SqlTypes.struct().field("ID", SqlTypes.BIGINT).field("NAME", SqlTypes.STRING).build();
final SqlType itemInfoSchema = SqlTypes.struct().field("ITEMID", SqlTypes.BIGINT).field("NAME", SqlTypes.STRING).field("CATEGORY", categorySchema).build();
final LogicalSchema ordersSchema = LogicalSchema.builder().keyColumn(ColumnName.of("ORDERTIME"), SqlTypes.BIGINT).valueColumn(ColumnName.of("ORDERID"), SqlTypes.BIGINT).valueColumn(ColumnName.of("ITEMID"), SqlTypes.STRING).valueColumn(ColumnName.of("ITEMINFO"), itemInfoSchema).valueColumn(ColumnName.of("ORDERUNITS"), SqlTypes.INTEGER).valueColumn(ColumnName.of("ARRAYCOL"), SqlTypes.array(SqlTypes.DOUBLE)).valueColumn(ColumnName.of("MAPCOL"), SqlTypes.map(SqlTypes.STRING, SqlTypes.DOUBLE)).valueColumn(ColumnName.of("ADDRESS"), addressSchema).valueColumn(ColumnName.of("TIMESTAMPCOL"), SqlTypes.TIMESTAMP).valueColumn(ColumnName.of("TIMECOL"), SqlTypes.TIME).valueColumn(ColumnName.of("DATECOL"), SqlTypes.DATE).valueColumn(ColumnName.of("BYTESCOL"), SqlTypes.BYTES).build();
final KsqlTopic ksqlTopicOrders = new KsqlTopic("orders_topic", keyFormat, valueFormat);
final KsqlStream<?> ksqlStreamOrders = new KsqlStream<>("sqlexpression", SourceName.of("ORDERS"), ordersSchema, Optional.empty(), false, ksqlTopicOrders, false);
metaStore.putSource(ksqlStreamOrders, false);
final LogicalSchema testTable3 = LogicalSchema.builder().keyColumn(ColumnName.of("COL0"), SqlTypes.BIGINT).valueColumn(ColumnName.of("COL1"), SqlTypes.STRING).valueColumn(ColumnName.of("COL2"), SqlTypes.STRING).valueColumn(ColumnName.of("COL3"), SqlTypes.DOUBLE).valueColumn(ColumnName.of("COL4"), SqlTypes.BOOLEAN).build();
final KsqlTopic ksqlTopic3 = new KsqlTopic("test3", keyFormat, valueFormat);
final KsqlTable<String> ksqlTable3 = new KsqlTable<>("sqlexpression", SourceName.of("TEST3"), testTable3, Optional.empty(), false, ksqlTopic3, false);
metaStore.putSource(ksqlTable3, false);
final SqlType nestedOrdersSchema = SqlTypes.struct().field("ORDERTIME", SqlTypes.BIGINT).field("ORDERID", SqlTypes.BIGINT).field("ITEMID", SqlTypes.STRING).field("ITEMINFO", itemInfoSchema).field("ORDERUNITS", SqlTypes.INTEGER).field("ARRAYCOL", SqlTypes.array(SqlTypes.DOUBLE)).field("MAPCOL", SqlTypes.map(SqlTypes.STRING, SqlTypes.DOUBLE)).field("ADDRESS", addressSchema).build();
final LogicalSchema nestedArrayStructMapSchema = LogicalSchema.builder().keyColumn(ColumnName.of("K"), SqlTypes.STRING).valueColumn(ColumnName.of("ARRAYCOL"), SqlTypes.array(itemInfoSchema)).valueColumn(ColumnName.of("MAPCOL"), SqlTypes.map(SqlTypes.STRING, itemInfoSchema)).valueColumn(ColumnName.of("NESTED_ORDER_COL"), nestedOrdersSchema).valueColumn(ColumnName.of("ITEM"), itemInfoSchema).build();
final KsqlTopic nestedArrayStructMapTopic = new KsqlTopic("NestedArrayStructMap_topic", keyFormat, valueFormat);
final KsqlStream<?> nestedArrayStructMapOrders = new KsqlStream<>("sqlexpression", SourceName.of("NESTED_STREAM"), nestedArrayStructMapSchema, Optional.empty(), false, nestedArrayStructMapTopic, false);
metaStore.putSource(nestedArrayStructMapOrders, false);
final KsqlTopic ksqlTopic4 = new KsqlTopic("test4", keyFormat, valueFormat);
final KsqlStream<?> ksqlStream4 = new KsqlStream<>("sqlexpression4", SourceName.of("TEST4"), test1Schema, Optional.empty(), false, ksqlTopic4, false);
metaStore.putSource(ksqlStream4, false);
final LogicalSchema sensorReadingsSchema = LogicalSchema.builder().keyColumn(ColumnName.of("ID"), SqlTypes.BIGINT).valueColumn(ColumnName.of("SENSOR_NAME"), SqlTypes.STRING).valueColumn(ColumnName.of("ARR1"), SqlTypes.array(SqlTypes.BIGINT)).valueColumn(ColumnName.of("ARR2"), SqlTypes.array(SqlTypes.STRING)).build();
final KsqlTopic ksqlTopicSensorReadings = new KsqlTopic("sensor_readings_topic", keyFormat, valueFormat);
final KsqlStream<?> ksqlStreamSensorReadings = new KsqlStream<>("sqlexpression", SourceName.of("SENSOR_READINGS"), sensorReadingsSchema, Optional.empty(), false, ksqlTopicSensorReadings, false);
metaStore.putSource(ksqlStreamSensorReadings, false);
final LogicalSchema testTable5 = LogicalSchema.builder().keyColumn(ColumnName.of("A"), SqlTypes.BOOLEAN).valueColumn(ColumnName.of("B"), SqlTypes.BOOLEAN).valueColumn(ColumnName.of("C"), SqlTypes.BOOLEAN).valueColumn(ColumnName.of("D"), SqlTypes.BOOLEAN).valueColumn(ColumnName.of("E"), SqlTypes.BOOLEAN).valueColumn(ColumnName.of("F"), SqlTypes.BOOLEAN).valueColumn(ColumnName.of("G"), SqlTypes.BOOLEAN).build();
final KsqlTopic ksqlTopic5 = new KsqlTopic("test5", keyFormat, valueFormat);
final KsqlTable<String> ksqlTable5 = new KsqlTable<>("sqlexpression", SourceName.of("TEST5"), testTable5, Optional.empty(), false, ksqlTopic5, false);
metaStore.putSource(ksqlTable5, false);
return metaStore;
}
use of io.confluent.ksql.metastore.MutableMetaStore in project ksql by confluentinc.
the class AnalyzerFunctionalTest method shouldNotInheritNamespaceExplicitlySetUpstreamForAvro.
@Test
public void shouldNotInheritNamespaceExplicitlySetUpstreamForAvro() {
final String simpleQuery = "create stream s1 as select * from S0;";
final MutableMetaStore newAvroMetaStore = avroMetaStore.copy();
final KsqlTopic ksqlTopic = new KsqlTopic("s0", KeyFormat.nonWindowed(FormatInfo.of(FormatFactory.KAFKA.name()), SerdeFeatures.of()), ValueFormat.of(FormatInfo.of(FormatFactory.AVRO.name(), ImmutableMap.of(ConnectProperties.FULL_SCHEMA_NAME, "org.ac.s1")), SerdeFeatures.of()));
final LogicalSchema schema = LogicalSchema.builder().keyColumn(SystemColumns.ROWKEY_NAME, SqlTypes.STRING).valueColumn(ColumnName.of("FIELD1"), SqlTypes.BIGINT).build();
final KsqlStream<?> ksqlStream = new KsqlStream<>("create stream s0 with(KAFKA_TOPIC='s0', VALUE_AVRO_SCHEMA_FULL_NAME='org.ac.s1', VALUE_FORMAT='avro');", SourceName.of("S0"), schema, Optional.empty(), false, ksqlTopic, false);
newAvroMetaStore.putSource(ksqlStream, false);
final List<Statement> statements = parse(simpleQuery, newAvroMetaStore);
final CreateStreamAsSelect createStreamAsSelect = (CreateStreamAsSelect) statements.get(0);
final Query query = createStreamAsSelect.getQuery();
final Analyzer analyzer = new Analyzer(newAvroMetaStore, "", ROWPARTITION_ROWOFFSET_ENABLED, PULL_LIMIT_CLAUSE_ENABLED);
final Analysis analysis = analyzer.analyze(query, Optional.of(createStreamAsSelect.getSink()));
assertThat(analysis.getInto(), is(not(Optional.empty())));
assertThat(analysis.getInto().get().getNewTopic().get().getValueFormat(), is(FormatInfo.of(FormatFactory.AVRO.name())));
}
Aggregations