use of io.confluent.ksql.function.FunctionRegistry in project ksql by confluentinc.
the class LogicalPlannerTest method init.
@Before
public void init() {
metaStore = MetaStoreFixture.getNewMetaStore();
functionRegistry = new FunctionRegistry();
}
use of io.confluent.ksql.function.FunctionRegistry in project ksql by confluentinc.
the class KsqlStructuredDataOutputNodeTest method shouldCreateSinkWithCorrectCleanupPolicyNonWindowedTable.
@Test
public void shouldCreateSinkWithCorrectCleanupPolicyNonWindowedTable() {
KafkaTopicClient topicClientForNonWindowTable = EasyMock.mock(KafkaTopicClient.class);
KsqlStructuredDataOutputNode outputNode = getKsqlStructuredDataOutputNode(false);
StreamsBuilder streamsBuilder = new StreamsBuilder();
Map<String, String> topicConfig = ImmutableMap.of(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT);
topicClientForNonWindowTable.createTopic("output", 4, (short) 3, topicConfig);
EasyMock.replay(topicClientForNonWindowTable);
SchemaKStream schemaKStream = outputNode.buildStream(streamsBuilder, ksqlConfig, topicClientForNonWindowTable, new FunctionRegistry(), new HashMap<>(), new MockSchemaRegistryClient());
assertThat(schemaKStream, instanceOf(SchemaKTable.class));
EasyMock.verify();
}
use of io.confluent.ksql.function.FunctionRegistry in project ksql by confluentinc.
the class KsqlStructuredDataOutputNodeTest method shouldCreateSinkWithCorrectCleanupPolicyWindowedTable.
@Test
public void shouldCreateSinkWithCorrectCleanupPolicyWindowedTable() {
KafkaTopicClient topicClientForWindowTable = EasyMock.mock(KafkaTopicClient.class);
KsqlStructuredDataOutputNode outputNode = getKsqlStructuredDataOutputNode(true);
StreamsBuilder streamsBuilder = new StreamsBuilder();
topicClientForWindowTable.createTopic("output", 4, (short) 3, Collections.emptyMap());
EasyMock.replay(topicClientForWindowTable);
SchemaKStream schemaKStream = outputNode.buildStream(streamsBuilder, ksqlConfig, topicClientForWindowTable, new FunctionRegistry(), new HashMap<>(), new MockSchemaRegistryClient());
assertThat(schemaKStream, instanceOf(SchemaKTable.class));
EasyMock.verify();
}
use of io.confluent.ksql.function.FunctionRegistry in project ksql by confluentinc.
the class PhysicalPlanBuilderTest method buildPhysicalPlanBuilder.
private PhysicalPlanBuilder buildPhysicalPlanBuilder(Map<String, Object> overrideProperties) {
final StreamsBuilder streamsBuilder = new StreamsBuilder();
final FunctionRegistry functionRegistry = new FunctionRegistry();
Map<String, Object> configMap = new HashMap<>();
configMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
configMap.put("application.id", "KSQL");
configMap.put("commit.interval.ms", 0);
configMap.put("cache.max.bytes.buffering", 0);
configMap.put("auto.offset.reset", "earliest");
return new PhysicalPlanBuilder(streamsBuilder, new KsqlConfig(configMap), new FakeKafkaTopicClient(), functionRegistry, overrideProperties, false, metaStore, new MockSchemaRegistryClient(), testKafkaStreamsBuilder);
}
use of io.confluent.ksql.function.FunctionRegistry in project ksql by confluentinc.
the class SchemaKStream method groupBy.
public SchemaKGroupedStream groupBy(final Serde<String> keySerde, final Serde<GenericRow> valSerde, final List<Expression> groupByExpressions) {
boolean rekey = rekeyRequired(groupByExpressions);
if (!rekey) {
KGroupedStream kgroupedStream = kstream.groupByKey(Serialized.with(keySerde, valSerde));
return new SchemaKGroupedStream(schema, kgroupedStream, keyField, Collections.singletonList(this), functionRegistry, schemaRegistryClient);
}
// Collect the column indexes, and build the new key as <column1>+<column2>+...
StringBuilder aggregateKeyName = new StringBuilder();
List<Integer> newKeyIndexes = new ArrayList<>();
boolean addSeparator = false;
for (Expression groupByExpr : groupByExpressions) {
if (addSeparator) {
aggregateKeyName.append("|+|");
} else {
addSeparator = true;
}
aggregateKeyName.append(groupByExpr.toString());
newKeyIndexes.add(SchemaUtil.getIndexInSchema(groupByExpr.toString(), getSchema()));
}
KGroupedStream kgroupedStream = kstream.filter((key, value) -> value != null).groupBy((key, value) -> {
StringBuilder newKey = new StringBuilder();
boolean addSeparator1 = false;
for (int index : newKeyIndexes) {
if (addSeparator1) {
newKey.append("|+|");
} else {
addSeparator1 = true;
}
newKey.append(String.valueOf(value.getColumns().get(index)));
}
return newKey.toString();
}, Serialized.with(keySerde, valSerde));
// TODO: if the key is a prefix of the grouping columns then we can
// use the repartition reflection hack to tell streams not to
// repartition.
Field newKeyField = new Field(aggregateKeyName.toString(), -1, Schema.STRING_SCHEMA);
return new SchemaKGroupedStream(schema, kgroupedStream, newKeyField, Collections.singletonList(this), functionRegistry, schemaRegistryClient);
}
Aggregations