use of io.confluent.ksql.function.InternalFunctionRegistry in project ksql by confluentinc.
the class FlatMapNodeTest method setUp.
@Before
public void setUp() {
when(source.getSchema()).thenReturn(SOURCE_SCHEMA);
when(source.getNodeOutputType()).thenReturn(DataSourceType.KSTREAM);
when(analysis.getTableFunctions()).thenReturn(ImmutableList.of(A_TABLE_FUNCTION));
when(analysis.getSelectItems()).thenReturn(ImmutableList.of(new AllColumns(Optional.empty()), new SingleColumn(new UnqualifiedColumnReferenceExp(COL0), Optional.empty()), new SingleColumn(A_TABLE_FUNCTION, Optional.empty())));
final MutableFunctionRegistry functionRegistry = new InternalFunctionRegistry();
new UdtfLoader(functionRegistry, Optional.empty(), SqlTypeParser.create(TypeRegistry.EMPTY), true).loadUdtfFromClass(Explode.class, "load path");
flatMapNode = new FlatMapNode(PLAN_ID, source, functionRegistry, analysis);
}
use of io.confluent.ksql.function.InternalFunctionRegistry in project ksql by confluentinc.
the class JoinNodeTest method buildJoinNode.
private void buildJoinNode(final String queryString) {
final MetaStore metaStore = MetaStoreFixture.getNewMetaStore(new InternalFunctionRegistry());
final KsqlBareOutputNode planNode = (KsqlBareOutputNode) AnalysisTestUtil.buildLogicalPlan(ksqlConfig, queryString, metaStore);
joinNode = (JoinNode) ((ProjectNode) planNode.getSource()).getSource();
}
use of io.confluent.ksql.function.InternalFunctionRegistry in project ksql by confluentinc.
the class EngineExecutor method sourceTablePlan.
@SuppressFBWarnings(value = "NP_NULL_PARAM_DEREF_NONVIRTUAL")
private KsqlPlan sourceTablePlan(final ConfiguredStatement<?> statement) {
final CreateTable createTable = (CreateTable) statement.getStatement();
final CreateTableCommand ddlCommand = (CreateTableCommand) engineContext.createDdlCommand(statement.getStatementText(), (ExecutableDdlStatement) statement.getStatement(), config);
final Relation from = new AliasedRelation(new Table(createTable.getName()), createTable.getName());
// Only VALUE or HEADER columns must be selected from the source table. When running a
// pull query, the keys are added if selecting all columns.
final Select select = new Select(createTable.getElements().stream().filter(column -> !column.getConstraints().isKey() && !column.getConstraints().isPrimaryKey()).map(column -> new SingleColumn(new UnqualifiedColumnReferenceExp(column.getName()), Optional.of(column.getName()))).collect(Collectors.toList()));
// Source table need to keep emitting changes so every new record is materialized for
// pull query availability.
final RefinementInfo refinementInfo = RefinementInfo.of(OutputRefinement.CHANGES);
// This is a plan for a `select * from <source-table> emit changes` statement,
// without a sink topic to write the results. The query is just made to materialize the
// source table.
final Query query = new Query(Optional.empty(), select, from, Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.of(refinementInfo), false, OptionalInt.empty());
// The source table does not exist in the current metastore, so a temporary metastore that
// contains only the source table is created here. This metastore is used later to create
// ExecutorsPlan.
final MutableMetaStore tempMetastore = new MetaStoreImpl(new InternalFunctionRegistry());
final Formats formats = ddlCommand.getFormats();
tempMetastore.putSource(new KsqlTable<>(statement.getStatementText(), createTable.getName(), ddlCommand.getSchema(), Optional.empty(), false, new KsqlTopic(ddlCommand.getTopicName(), KeyFormat.of(formats.getKeyFormat(), formats.getKeyFeatures(), Optional.empty()), ValueFormat.of(formats.getValueFormat(), formats.getValueFeatures())), true), false);
final ExecutorPlans plans = planQuery(statement, query, Optional.empty(), Optional.empty(), tempMetastore);
final KsqlBareOutputNode outputNode = (KsqlBareOutputNode) plans.logicalPlan.getNode().get();
final QueryPlan queryPlan = new QueryPlan(getSourceNames(outputNode), Optional.empty(), plans.physicalPlan.getPhysicalPlan(), plans.physicalPlan.getQueryId(), getApplicationId(plans.physicalPlan.getQueryId(), getSourceNames(outputNode)));
engineContext.createQueryValidator().validateQuery(config, plans.physicalPlan, engineContext.getQueryRegistry().getAllLiveQueries());
return KsqlPlan.queryPlanCurrent(statement.getStatementText(), Optional.of(ddlCommand), queryPlan);
}
use of io.confluent.ksql.function.InternalFunctionRegistry in project ksql by confluentinc.
the class KsqlContext method create.
/**
* Create a KSQL context object with the given properties. A KSQL context has it's own metastore
* valid during the life of the object.
*/
public static KsqlContext create(final KsqlConfig ksqlConfig, final ProcessingLogContext processingLogContext, final MetricCollectors metricCollectors) {
Objects.requireNonNull(ksqlConfig, "ksqlConfig cannot be null.");
final ServiceContext serviceContext = ServiceContextFactory.create(ksqlConfig, DisabledKsqlClient::instance);
final MutableFunctionRegistry functionRegistry = new InternalFunctionRegistry();
UserFunctionLoader.newInstance(ksqlConfig, functionRegistry, ".", metricCollectors.getMetrics()).load();
final ServiceInfo serviceInfo = ServiceInfo.create(ksqlConfig);
final KsqlEngine engine = new KsqlEngine(serviceContext, processingLogContext, functionRegistry, serviceInfo, new SequentialQueryIdGenerator(), ksqlConfig, Collections.emptyList(), metricCollectors);
return new KsqlContext(serviceContext, ksqlConfig, engine, Injectors.DEFAULT);
}
use of io.confluent.ksql.function.InternalFunctionRegistry in project ksql by confluentinc.
the class SchemaRegisterInjectorTest method setUp.
@Before
public void setUp() throws IOException, RestClientException {
metaStore = new MetaStoreImpl(new InternalFunctionRegistry());
config = new KsqlConfig(ImmutableMap.of(KsqlConfig.SCHEMA_REGISTRY_URL_PROPERTY, "foo:8081"));
injector = new SchemaRegisterInjector(executionContext, serviceContext);
when(serviceContext.getSchemaRegistryClient()).thenReturn(schemaRegistryClient);
when(serviceContext.getTopicClient()).thenReturn(topicClient);
when(serviceContext.getConsumerGroupClient()).thenReturn(consumerGroupClient);
when(executionContext.createSandbox(any())).thenReturn(executionSandbox);
when(keyFeatures.enabled(SerdeFeature.UNWRAP_SINGLES)).thenReturn(true);
when(ddlCommand.getSchema()).thenReturn(SCHEMA);
when(ddlCommand.getTopicName()).thenReturn("SINK");
when(ddlCommand.getFormats()).thenReturn(formats);
when(formats.getKeyFormat()).thenReturn(FormatInfo.of(FormatFactory.AVRO.name()));
when(formats.getKeyFeatures()).thenReturn(keyFeatures);
when(formats.getValueFormat()).thenReturn(FormatInfo.of(FormatFactory.AVRO.name()));
when(formats.getValueFeatures()).thenReturn(valFeatures);
when(schemaRegistryClient.getLatestSchemaMetadata(any())).thenThrow(new RestClientException("foo", 404, SchemaRegistryUtil.SUBJECT_NOT_FOUND_ERROR_CODE));
final KsqlTopic sourceTopic = new KsqlTopic("source", KeyFormat.nonWindowed(FormatInfo.of(FormatFactory.KAFKA.name()), SerdeFeatures.of()), ValueFormat.of(FormatInfo.of(FormatFactory.JSON.name()), valFeatures));
final KsqlStream<?> source = new KsqlStream<>("", SourceName.of("SOURCE"), SCHEMA, Optional.empty(), false, sourceTopic, false);
metaStore.putSource(source, false);
}
Aggregations