use of io.confluent.ksql.util.KsqlConfig in project ksql by confluentinc.
the class IntegrationTestHarness method start.
public void start() throws Exception {
embeddedKafkaCluster = new EmbeddedSingleNodeKafkaCluster();
embeddedKafkaCluster.start();
Map<String, Object> configMap = new HashMap<>();
configMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, embeddedKafkaCluster.bootstrapServers());
configMap.put("application.id", "KSQL");
configMap.put("commit.interval.ms", 0);
configMap.put("cache.max.bytes.buffering", 0);
configMap.put("auto.offset.reset", "earliest");
configMap.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath());
this.ksqlConfig = new KsqlConfig(configMap);
this.adminClient = AdminClient.create(ksqlConfig.getKsqlAdminClientConfigProps());
this.topicClient = new KafkaTopicClientImpl(adminClient);
}
use of io.confluent.ksql.util.KsqlConfig in project ksql by confluentinc.
the class JoinIntTest method before.
@Before
public void before() throws Exception {
testHarness = new IntegrationTestHarness();
testHarness.start();
Map<String, Object> ksqlStreamConfigProps = new HashMap<>();
ksqlStreamConfigProps.putAll(testHarness.ksqlConfig.getKsqlStreamConfigProps());
// turn caching off to improve join consistency
ksqlStreamConfigProps.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
ksqlStreamConfigProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
ksqlContext = KsqlContext.create(new KsqlConfig(ksqlStreamConfigProps), testHarness.schemaRegistryClient);
/**
* Setup test data
*/
testHarness.createTopic(itemTableTopicJson);
testHarness.createTopic(itemTableTopicAvro);
itemDataProvider = new ItemDataProvider();
testHarness.publishTestData(itemTableTopicJson, itemDataProvider, now - 500);
testHarness.publishTestData(itemTableTopicAvro, itemDataProvider, now - 500, DataSource.DataSourceSerDe.AVRO);
testHarness.createTopic(orderStreamTopicJson);
testHarness.createTopic(orderStreamTopicAvro);
orderDataProvider = new OrderDataProvider();
testHarness.publishTestData(orderStreamTopicJson, orderDataProvider, now);
testHarness.publishTestData(orderStreamTopicAvro, orderDataProvider, now, DataSource.DataSourceSerDe.AVRO);
createStreams();
}
use of io.confluent.ksql.util.KsqlConfig in project ksql by confluentinc.
the class JsonFormatTest method before.
@Before
public void before() throws Exception {
Map<String, Object> configMap = new HashMap<>();
configMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
configMap.put("application.id", "KSQL");
configMap.put("commit.interval.ms", 0);
configMap.put("cache.max.bytes.buffering", 0);
configMap.put("auto.offset.reset", "earliest");
KsqlConfig ksqlConfig = new KsqlConfig(configMap);
adminClient = AdminClient.create(ksqlConfig.getKsqlAdminClientConfigProps());
topicClient = new KafkaTopicClientImpl(adminClient);
ksqlEngine = new KsqlEngine(ksqlConfig, topicClient);
metaStore = ksqlEngine.getMetaStore();
createInitTopics();
produceInitData();
execInitCreateStreamQueries();
}
use of io.confluent.ksql.util.KsqlConfig in project ksql by confluentinc.
the class SecureIntegrationTest method before.
@Before
public void before() throws Exception {
SECURE_CLUSTER.clearAcls();
outputTopic = "TEST_" + COUNTER.incrementAndGet();
topicClient = new KafkaTopicClientImpl(AdminClient.create(new KsqlConfig(getKsqlConfig(SUPER_USER)).getKsqlAdminClientConfigProps()));
produceInitData();
}
use of io.confluent.ksql.util.KsqlConfig in project ksql by confluentinc.
the class JoinNodeTest method shouldBuildTableNodeWithCorrectAutoCommitOffsetPolicy.
@Test
public void shouldBuildTableNodeWithCorrectAutoCommitOffsetPolicy() {
setupTopicClientExpectations(1, 1);
buildJoin();
KsqlConfig ksqlConfig = mock(KsqlConfig.class);
KafkaTopicClient kafkaTopicClient = mock(KafkaTopicClient.class);
FunctionRegistry functionRegistry = mock(FunctionRegistry.class);
class RightTable extends PlanNode {
final Schema schema;
public RightTable(final PlanNodeId id, Schema schema) {
super(id);
this.schema = schema;
}
@Override
public Schema getSchema() {
return schema;
}
@Override
public Field getKeyField() {
return null;
}
@Override
public List<PlanNode> getSources() {
return null;
}
@Override
public SchemaKStream buildStream(StreamsBuilder builder, KsqlConfig ksqlConfig, KafkaTopicClient kafkaTopicClient, FunctionRegistry functionRegistry, Map<String, Object> props, SchemaRegistryClient schemaRegistryClient) {
if (props.containsKey(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG) && props.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG).toString().equalsIgnoreCase("EARLIEST")) {
return mock(SchemaKTable.class);
} else {
throw new KsqlException("auto.offset.reset should be set to EARLIEST.");
}
}
@Override
protected int getPartitions(KafkaTopicClient kafkaTopicClient) {
return 1;
}
}
RightTable rightTable = new RightTable(new PlanNodeId("1"), joinNode.getRight().getSchema());
JoinNode testJoinNode = new JoinNode(joinNode.getId(), joinNode.getType(), joinNode.getLeft(), rightTable, joinNode.getLeftKeyFieldName(), joinNode.getRightKeyFieldName(), joinNode.getLeftAlias(), joinNode.getRightAlias());
testJoinNode.tableForJoin(builder, ksqlConfig, kafkaTopicClient, functionRegistry, new HashMap<>(), new MockSchemaRegistryClient());
}
Aggregations