Search in sources :

Example 1 with KafkaTopicClient

use of io.confluent.ksql.util.KafkaTopicClient in project ksql by confluentinc.

the class WindowingIntTest method shouldAggregateWithNoWindow.

@Test
public void shouldAggregateWithNoWindow() throws Exception {
    testHarness.publishTestData(topicName, dataProvider, now);
    final String streamName = "NOWINDOW_AGGTEST";
    final String queryString = String.format("CREATE TABLE %s AS SELECT %s FROM ORDERS WHERE ITEMID = 'ITEM_1' GROUP BY ITEMID;", streamName, "ITEMID, COUNT(ITEMID), SUM(ORDERUNITS)");
    ksqlContext.sql(queryString);
    Schema resultSchema = ksqlContext.getMetaStore().getSource(streamName).getSchema();
    final GenericRow expected = new GenericRow(Arrays.asList(null, null, "ITEM_1", 2, /**
     * 2 x items *
     */
    20.0));
    final Map<String, GenericRow> results = new HashMap<>();
    TestUtils.waitForCondition(() -> {
        final Map<String, GenericRow> aggregateResults = testHarness.consumeData(streamName, resultSchema, 1, new StringDeserializer(), MAX_POLL_PER_ITERATION);
        final GenericRow actual = aggregateResults.get("ITEM_1");
        return expected.equals(actual);
    }, 60000, "didn't receive correct results within timeout");
    AdminClient adminClient = AdminClient.create(testHarness.ksqlConfig.getKsqlStreamConfigProps());
    KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient);
    Set<String> topicBeforeCleanup = topicClient.listTopicNames();
    assertThat("Expected to have 5 topics instead have : " + topicBeforeCleanup.size(), topicBeforeCleanup.size(), equalTo(5));
    QueryMetadata queryMetadata = ksqlContext.getRunningQueries().iterator().next();
    queryMetadata.close();
    Set<String> topicsAfterCleanUp = topicClient.listTopicNames();
    assertThat("Expected to see 3 topics after clean up but seeing " + topicsAfterCleanUp.size(), topicsAfterCleanUp.size(), equalTo(3));
    assertThat(topicClient.getTopicCleanupPolicy(streamName), equalTo(KafkaTopicClient.TopicCleanupPolicy.COMPACT));
}
Also used : GenericRow(io.confluent.ksql.GenericRow) QueryMetadata(io.confluent.ksql.util.QueryMetadata) KafkaTopicClient(io.confluent.ksql.util.KafkaTopicClient) HashMap(java.util.HashMap) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) Schema(org.apache.kafka.connect.data.Schema) KafkaTopicClientImpl(io.confluent.ksql.util.KafkaTopicClientImpl) AdminClient(org.apache.kafka.clients.admin.AdminClient) IntegrationTest(io.confluent.common.utils.IntegrationTest) Test(org.junit.Test)

Example 2 with KafkaTopicClient

use of io.confluent.ksql.util.KafkaTopicClient in project ksql by confluentinc.

the class WindowingIntTest method shouldAggregateSessionWindow.

@Test
public void shouldAggregateSessionWindow() throws Exception {
    testHarness.publishTestData(topicName, dataProvider, now);
    final String streamName = "SESSION_AGGTEST";
    final String queryString = String.format("CREATE TABLE %s AS SELECT %s FROM ORDERS WINDOW %s GROUP BY ORDERID;", streamName, "ORDERID, COUNT(*), SUM(ORDERUNITS)", "SESSION (10 SECONDS)");
    ksqlContext.sql(queryString);
    Schema resultSchema = ksqlContext.getMetaStore().getSource(streamName).getSchema();
    GenericRow expectedResults = new GenericRow(Arrays.asList(null, null, "ORDER_6", 6, /**
     * 2 x items *
     */
    420.0));
    final Map<String, GenericRow> results = new HashMap<>();
    TestUtils.waitForCondition(() -> {
        final Map<Windowed<String>, GenericRow> windowedResults = testHarness.consumeData(streamName, resultSchema, datasetOneMetaData.size(), new TimeWindowedDeserializer<>(new StringDeserializer()), 1000);
        updateResults(results, windowedResults);
        final GenericRow actual = results.get("ORDER_6");
        return expectedResults.equals(actual) && results.size() == 6;
    }, 60000, "didn't receive correct results within timeout");
    AdminClient adminClient = AdminClient.create(testHarness.ksqlConfig.getKsqlStreamConfigProps());
    KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient);
    Set<String> topicBeforeCleanup = topicClient.listTopicNames();
    assertThat("Expected to have 5 topics instead have : " + topicBeforeCleanup.size(), topicBeforeCleanup.size(), equalTo(5));
    QueryMetadata queryMetadata = ksqlContext.getRunningQueries().iterator().next();
    queryMetadata.close();
    Set<String> topicsAfterCleanUp = topicClient.listTopicNames();
    assertThat("Expected to see 3 topics after clean up but seeing " + topicsAfterCleanUp.size(), topicsAfterCleanUp.size(), equalTo(3));
    assertThat(topicClient.getTopicCleanupPolicy(streamName), equalTo(KafkaTopicClient.TopicCleanupPolicy.DELETE));
}
Also used : QueryMetadata(io.confluent.ksql.util.QueryMetadata) HashMap(java.util.HashMap) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) Schema(org.apache.kafka.connect.data.Schema) GenericRow(io.confluent.ksql.GenericRow) Windowed(org.apache.kafka.streams.kstream.Windowed) KafkaTopicClient(io.confluent.ksql.util.KafkaTopicClient) KafkaTopicClientImpl(io.confluent.ksql.util.KafkaTopicClientImpl) AdminClient(org.apache.kafka.clients.admin.AdminClient) IntegrationTest(io.confluent.common.utils.IntegrationTest) Test(org.junit.Test)

Example 3 with KafkaTopicClient

use of io.confluent.ksql.util.KafkaTopicClient in project ksql by confluentinc.

the class WindowingIntTest method shouldAggregateHoppingWindow.

@Test
public void shouldAggregateHoppingWindow() throws Exception {
    testHarness.publishTestData(topicName, dataProvider, now);
    final String streamName = "HOPPING_AGGTEST";
    final String queryString = String.format("CREATE TABLE %s AS SELECT %s FROM ORDERS WINDOW %s WHERE ITEMID = 'ITEM_1' GROUP BY ITEMID;", streamName, "ITEMID, COUNT(ITEMID), SUM(ORDERUNITS)", "HOPPING ( SIZE 10 SECONDS, ADVANCE BY 5 SECONDS)");
    ksqlContext.sql(queryString);
    Schema resultSchema = ksqlContext.getMetaStore().getSource(streamName).getSchema();
    final GenericRow expected = new GenericRow(Arrays.asList(null, null, "ITEM_1", 2, /**
     * 2 x items *
     */
    20.0));
    final Map<String, GenericRow> results = new HashMap<>();
    TestUtils.waitForCondition(() -> {
        final Map<Windowed<String>, GenericRow> windowedResults = testHarness.consumeData(streamName, resultSchema, 1, new TimeWindowedDeserializer<>(new StringDeserializer()), 1000);
        updateResults(results, windowedResults);
        final GenericRow actual = results.get("ITEM_1");
        return expected.equals(actual);
    }, 60000, "didn't receive correct results within timeout");
    AdminClient adminClient = AdminClient.create(testHarness.ksqlConfig.getKsqlStreamConfigProps());
    KafkaTopicClient topicClient = new KafkaTopicClientImpl(adminClient);
    Set<String> topicBeforeCleanup = topicClient.listTopicNames();
    assertThat("Expected to have 5 topics instead have : " + topicBeforeCleanup.size(), topicBeforeCleanup.size(), equalTo(5));
    QueryMetadata queryMetadata = ksqlContext.getRunningQueries().iterator().next();
    queryMetadata.close();
    Set<String> topicsAfterCleanUp = topicClient.listTopicNames();
    assertThat("Expected to see 3 topics after clean up but seeing " + topicsAfterCleanUp.size(), topicsAfterCleanUp.size(), equalTo(3));
    assertThat(topicClient.getTopicCleanupPolicy(streamName), equalTo(KafkaTopicClient.TopicCleanupPolicy.DELETE));
}
Also used : QueryMetadata(io.confluent.ksql.util.QueryMetadata) HashMap(java.util.HashMap) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) Schema(org.apache.kafka.connect.data.Schema) GenericRow(io.confluent.ksql.GenericRow) Windowed(org.apache.kafka.streams.kstream.Windowed) KafkaTopicClient(io.confluent.ksql.util.KafkaTopicClient) KafkaTopicClientImpl(io.confluent.ksql.util.KafkaTopicClientImpl) AdminClient(org.apache.kafka.clients.admin.AdminClient) IntegrationTest(io.confluent.common.utils.IntegrationTest) Test(org.junit.Test)

Example 4 with KafkaTopicClient

use of io.confluent.ksql.util.KafkaTopicClient in project ksql by confluentinc.

the class JoinNodeTest method shouldBuildTableNodeWithCorrectAutoCommitOffsetPolicy.

@Test
public void shouldBuildTableNodeWithCorrectAutoCommitOffsetPolicy() {
    setupTopicClientExpectations(1, 1);
    buildJoin();
    KsqlConfig ksqlConfig = mock(KsqlConfig.class);
    KafkaTopicClient kafkaTopicClient = mock(KafkaTopicClient.class);
    FunctionRegistry functionRegistry = mock(FunctionRegistry.class);
    class RightTable extends PlanNode {

        final Schema schema;

        public RightTable(final PlanNodeId id, Schema schema) {
            super(id);
            this.schema = schema;
        }

        @Override
        public Schema getSchema() {
            return schema;
        }

        @Override
        public Field getKeyField() {
            return null;
        }

        @Override
        public List<PlanNode> getSources() {
            return null;
        }

        @Override
        public SchemaKStream buildStream(StreamsBuilder builder, KsqlConfig ksqlConfig, KafkaTopicClient kafkaTopicClient, FunctionRegistry functionRegistry, Map<String, Object> props, SchemaRegistryClient schemaRegistryClient) {
            if (props.containsKey(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG) && props.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG).toString().equalsIgnoreCase("EARLIEST")) {
                return mock(SchemaKTable.class);
            } else {
                throw new KsqlException("auto.offset.reset should be set to EARLIEST.");
            }
        }

        @Override
        protected int getPartitions(KafkaTopicClient kafkaTopicClient) {
            return 1;
        }
    }
    RightTable rightTable = new RightTable(new PlanNodeId("1"), joinNode.getRight().getSchema());
    JoinNode testJoinNode = new JoinNode(joinNode.getId(), joinNode.getType(), joinNode.getLeft(), rightTable, joinNode.getLeftKeyFieldName(), joinNode.getRightKeyFieldName(), joinNode.getLeftAlias(), joinNode.getRightAlias());
    testJoinNode.tableForJoin(builder, ksqlConfig, kafkaTopicClient, functionRegistry, new HashMap<>(), new MockSchemaRegistryClient());
}
Also used : MockSchemaRegistryClient(io.confluent.kafka.schemaregistry.client.MockSchemaRegistryClient) Schema(org.apache.kafka.connect.data.Schema) KsqlConfig(io.confluent.ksql.util.KsqlConfig) KsqlException(io.confluent.ksql.util.KsqlException) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) FunctionRegistry(io.confluent.ksql.function.FunctionRegistry) KafkaTopicClient(io.confluent.ksql.util.KafkaTopicClient) HashMap(java.util.HashMap) Map(java.util.Map) SchemaRegistryClient(io.confluent.kafka.schemaregistry.client.SchemaRegistryClient) MockSchemaRegistryClient(io.confluent.kafka.schemaregistry.client.MockSchemaRegistryClient) Test(org.junit.Test)

Example 5 with KafkaTopicClient

use of io.confluent.ksql.util.KafkaTopicClient in project ksql by confluentinc.

the class KsqlStructuredDataOutputNodeTest method shouldCreateSinkWithCorrectCleanupPolicyStream.

@Test
public void shouldCreateSinkWithCorrectCleanupPolicyStream() {
    KafkaTopicClient topicClientForWindowTable = EasyMock.mock(KafkaTopicClient.class);
    StreamsBuilder streamsBuilder = new StreamsBuilder();
    topicClientForWindowTable.createTopic("output", 4, (short) 3, Collections.emptyMap());
    EasyMock.replay(topicClientForWindowTable);
    SchemaKStream schemaKStream = outputNode.buildStream(streamsBuilder, ksqlConfig, topicClientForWindowTable, new FunctionRegistry(), new HashMap<>(), new MockSchemaRegistryClient());
    assertThat(schemaKStream, instanceOf(SchemaKStream.class));
    EasyMock.verify();
}
Also used : StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) FunctionRegistry(io.confluent.ksql.function.FunctionRegistry) KafkaTopicClient(io.confluent.ksql.util.KafkaTopicClient) MockSchemaRegistryClient(io.confluent.kafka.schemaregistry.client.MockSchemaRegistryClient) SchemaKStream(io.confluent.ksql.structured.SchemaKStream) Test(org.junit.Test)

Aggregations

KafkaTopicClient (io.confluent.ksql.util.KafkaTopicClient)13 Test (org.junit.Test)10 KafkaTopicClientImpl (io.confluent.ksql.util.KafkaTopicClientImpl)8 HashMap (java.util.HashMap)7 AdminClient (org.apache.kafka.clients.admin.AdminClient)7 GenericRow (io.confluent.ksql.GenericRow)5 IntegrationTest (io.confluent.common.utils.IntegrationTest)4 MockSchemaRegistryClient (io.confluent.kafka.schemaregistry.client.MockSchemaRegistryClient)4 FunctionRegistry (io.confluent.ksql.function.FunctionRegistry)4 KsqlConfig (io.confluent.ksql.util.KsqlConfig)4 QueryMetadata (io.confluent.ksql.util.QueryMetadata)4 StringDeserializer (org.apache.kafka.common.serialization.StringDeserializer)4 Schema (org.apache.kafka.connect.data.Schema)4 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)4 KsqlEngine (io.confluent.ksql.KsqlEngine)3 SchemaKStream (io.confluent.ksql.structured.SchemaKStream)3 StreamedQueryResource (io.confluent.ksql.rest.server.resources.streaming.StreamedQueryResource)2 SchemaKTable (io.confluent.ksql.structured.SchemaKTable)2 KsqlException (io.confluent.ksql.util.KsqlException)2 Map (java.util.Map)2