use of org.apache.kafka.clients.admin.AdminClient in project ksql by confluentinc.
the class WindowingIntTest method shouldAggregateWithNoWindow.
@Test
public void shouldAggregateWithNoWindow() throws Exception {
testHarness.publishTestData(topicName, dataProvider, now);
final String streamName = "NOWINDOW_AGGTEST";
final String queryString = String.format("CREATE TABLE %s AS SELECT %s FROM ORDERS WHERE ITEMID = 'ITEM_1' GROUP BY ITEMID;", streamName, "ITEMID, COUNT(ITEMID), SUM(ORDERUNITS)");
ksqlContext.sql(queryString);
Schema resultSchema = ksqlContext.getMetaStore().getSource(streamName).getSchema();
final GenericRow expected = new GenericRow(Arrays.asList(null, null, "ITEM_1", 2, /**
* 2 x items *
*/
20.0));
final Map<String, GenericRow> results = new HashMap<>();
TestUtils.waitForCondition(() -> {
final Map<String, GenericRow> aggregateResults = testHarness.consumeData(streamName, resultSchema, 1, new StringDeserializer(), MAX_POLL_PER_ITERATION);
final GenericRow actual = aggregateResults.get("ITEM_1");
return expected.equals(actual);
}, 60000, "didn't receive correct results within timeout");
AdminClient adminClient = AdminClient.create(testHarness.ksqlConfig.getKsqlStreamConfigProps());
KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient);
Set<String> topicBeforeCleanup = topicClient.listTopicNames();
assertThat("Expected to have 5 topics instead have : " + topicBeforeCleanup.size(), topicBeforeCleanup.size(), equalTo(5));
QueryMetadata queryMetadata = ksqlContext.getRunningQueries().iterator().next();
queryMetadata.close();
Set<String> topicsAfterCleanUp = topicClient.listTopicNames();
assertThat("Expected to see 3 topics after clean up but seeing " + topicsAfterCleanUp.size(), topicsAfterCleanUp.size(), equalTo(3));
assertThat(topicClient.getTopicCleanupPolicy(streamName), equalTo(KafkaTopicClient.TopicCleanupPolicy.COMPACT));
}
use of org.apache.kafka.clients.admin.AdminClient in project ksql by confluentinc.
the class WindowingIntTest method shouldAggregateSessionWindow.
@Test
public void shouldAggregateSessionWindow() throws Exception {
testHarness.publishTestData(topicName, dataProvider, now);
final String streamName = "SESSION_AGGTEST";
final String queryString = String.format("CREATE TABLE %s AS SELECT %s FROM ORDERS WINDOW %s GROUP BY ORDERID;", streamName, "ORDERID, COUNT(*), SUM(ORDERUNITS)", "SESSION (10 SECONDS)");
ksqlContext.sql(queryString);
Schema resultSchema = ksqlContext.getMetaStore().getSource(streamName).getSchema();
GenericRow expectedResults = new GenericRow(Arrays.asList(null, null, "ORDER_6", 6, /**
* 2 x items *
*/
420.0));
final Map<String, GenericRow> results = new HashMap<>();
TestUtils.waitForCondition(() -> {
final Map<Windowed<String>, GenericRow> windowedResults = testHarness.consumeData(streamName, resultSchema, datasetOneMetaData.size(), new TimeWindowedDeserializer<>(new StringDeserializer()), 1000);
updateResults(results, windowedResults);
final GenericRow actual = results.get("ORDER_6");
return expectedResults.equals(actual) && results.size() == 6;
}, 60000, "didn't receive correct results within timeout");
AdminClient adminClient = AdminClient.create(testHarness.ksqlConfig.getKsqlStreamConfigProps());
KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient);
Set<String> topicBeforeCleanup = topicClient.listTopicNames();
assertThat("Expected to have 5 topics instead have : " + topicBeforeCleanup.size(), topicBeforeCleanup.size(), equalTo(5));
QueryMetadata queryMetadata = ksqlContext.getRunningQueries().iterator().next();
queryMetadata.close();
Set<String> topicsAfterCleanUp = topicClient.listTopicNames();
assertThat("Expected to see 3 topics after clean up but seeing " + topicsAfterCleanUp.size(), topicsAfterCleanUp.size(), equalTo(3));
assertThat(topicClient.getTopicCleanupPolicy(streamName), equalTo(KafkaTopicClient.TopicCleanupPolicy.DELETE));
}
use of org.apache.kafka.clients.admin.AdminClient in project ksql by confluentinc.
the class WindowingIntTest method shouldAggregateHoppingWindow.
@Test
public void shouldAggregateHoppingWindow() throws Exception {
testHarness.publishTestData(topicName, dataProvider, now);
final String streamName = "HOPPING_AGGTEST";
final String queryString = String.format("CREATE TABLE %s AS SELECT %s FROM ORDERS WINDOW %s WHERE ITEMID = 'ITEM_1' GROUP BY ITEMID;", streamName, "ITEMID, COUNT(ITEMID), SUM(ORDERUNITS)", "HOPPING ( SIZE 10 SECONDS, ADVANCE BY 5 SECONDS)");
ksqlContext.sql(queryString);
Schema resultSchema = ksqlContext.getMetaStore().getSource(streamName).getSchema();
final GenericRow expected = new GenericRow(Arrays.asList(null, null, "ITEM_1", 2, /**
* 2 x items *
*/
20.0));
final Map<String, GenericRow> results = new HashMap<>();
TestUtils.waitForCondition(() -> {
final Map<Windowed<String>, GenericRow> windowedResults = testHarness.consumeData(streamName, resultSchema, 1, new TimeWindowedDeserializer<>(new StringDeserializer()), 1000);
updateResults(results, windowedResults);
final GenericRow actual = results.get("ITEM_1");
return expected.equals(actual);
}, 60000, "didn't receive correct results within timeout");
AdminClient adminClient = AdminClient.create(testHarness.ksqlConfig.getKsqlStreamConfigProps());
KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient);
Set<String> topicBeforeCleanup = topicClient.listTopicNames();
assertThat("Expected to have 5 topics instead have : " + topicBeforeCleanup.size(), topicBeforeCleanup.size(), equalTo(5));
QueryMetadata queryMetadata = ksqlContext.getRunningQueries().iterator().next();
queryMetadata.close();
Set<String> topicsAfterCleanUp = topicClient.listTopicNames();
assertThat("Expected to see 3 topics after clean up but seeing " + topicsAfterCleanUp.size(), topicsAfterCleanUp.size(), equalTo(3));
assertThat(topicClient.getTopicCleanupPolicy(streamName), equalTo(KafkaTopicClient.TopicCleanupPolicy.DELETE));
}
use of org.apache.kafka.clients.admin.AdminClient in project ksql by confluentinc.
the class KsqlContextTest method shouldRunSimpleStatements.
@Test
public void shouldRunSimpleStatements() throws Exception {
AdminClient adminClient = mock(AdminClient.class);
KafkaTopicClient kafkaTopicClient = mock(KafkaTopicClientImpl.class);
KsqlEngine ksqlEngine = mock(KsqlEngine.class);
Map<QueryId, PersistentQueryMetadata> liveQueryMap = new HashMap<>();
KsqlContext ksqlContext = new KsqlContext(adminClient, kafkaTopicClient, ksqlEngine);
expect(ksqlEngine.buildMultipleQueries(statement1, Collections.emptyMap())).andReturn(Collections.emptyList());
expect(ksqlEngine.buildMultipleQueries(statement2, Collections.emptyMap())).andReturn(getQueryMetadata(new QueryId("CSAS_BIGORDERS"), DataSource.DataSourceType.KSTREAM));
expect(ksqlEngine.getPersistentQueries()).andReturn(liveQueryMap);
replay(ksqlEngine);
ksqlContext.sql(statement1);
ksqlContext.sql(statement2);
verify(ksqlEngine);
}
use of org.apache.kafka.clients.admin.AdminClient in project ranger by apache.
the class ServiceKafkaClient method getTopicList.
private List<String> getTopicList(List<String> ignoreTopicList) throws Exception {
List<String> ret = new ArrayList<String>();
int sessionTimeout = 5000;
int connectionTimeout = 10000;
AdminClient adminClient = null;
try {
Properties props = new Properties();
props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, configs.get(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG));
props.put(AdminClientConfig.SECURITY_PROTOCOL_CONFIG, configs.get(AdminClientConfig.SECURITY_PROTOCOL_CONFIG));
props.put(KEY_SASL_MECHANISM, configs.get(KEY_SASL_MECHANISM));
props.put(KEY_SASL_JAAS_CONFIG, getJAASConfig(configs));
props.put(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, getIntProperty(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, sessionTimeout));
props.put(AdminClientConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG, getIntProperty(AdminClientConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG, connectionTimeout));
adminClient = KafkaAdminClient.create(props);
ListTopicsResult listTopicsResult = adminClient.listTopics();
if (listTopicsResult != null) {
Collection<TopicListing> topicListings = listTopicsResult.listings().get();
for (TopicListing topicListing : topicListings) {
String topicName = topicListing.name();
if (ignoreTopicList == null || !ignoreTopicList.contains(topicName)) {
ret.add(topicName);
}
}
}
} catch (Exception e) {
throw e;
} finally {
if (adminClient != null) {
adminClient.close();
}
}
return ret;
}
Aggregations