Search in sources :

Example 11 with CommandId

use of io.confluent.ksql.rest.entity.CommandId in project ksql by confluentinc.

the class InteractiveStatementExecutorTest method tryDropThatViolatesReferentialIntegrity.

private void tryDropThatViolatesReferentialIntegrity() {
    final Command dropStreamCommand1 = commandWithPlan("drop stream pageview;", ksqlConfig.getAllConfigPropsWithSecretsObfuscated());
    final CommandId dropStreamCommandId1 = new CommandId(CommandId.Type.STREAM, "_PAGEVIEW", CommandId.Action.DROP);
    handleStatement(statementExecutor, dropStreamCommand1, dropStreamCommandId1, Optional.empty(), 0);
    // DROP statement should fail since the stream is being used.
    final Optional<CommandStatus> dropStreamCommandStatus1 = statementExecutor.getStatus(dropStreamCommandId1);
    Assert.assertTrue(dropStreamCommandStatus1.isPresent());
    assertThat(dropStreamCommandStatus1.get().getStatus(), CoreMatchers.equalTo(CommandStatus.Status.ERROR));
    assertThat(dropStreamCommandStatus1.get().getMessage(), containsString("io.confluent.ksql.util.KsqlReferentialIntegrityException: Cannot drop PAGEVIEW."));
    assertThat(dropStreamCommandStatus1.get().getMessage(), containsString("The following queries read from this source: [CTAS_TABLE1_2, CSAS_USER1PV_1]."));
    assertThat(dropStreamCommandStatus1.get().getMessage(), containsString("The following queries write into this source: []."));
    assertThat(dropStreamCommandStatus1.get().getMessage(), containsString("You need to terminate them before dropping PAGEVIEW."));
    final Command dropStreamCommand2 = commandWithPlan("drop stream user1pv;", ksqlConfig.getAllConfigPropsWithSecretsObfuscated());
    final CommandId dropStreamCommandId2 = new CommandId(CommandId.Type.STREAM, "_user1pv", CommandId.Action.DROP);
    handleStatement(statementExecutor, dropStreamCommand2, dropStreamCommandId2, Optional.empty(), 0);
    // DROP statement should fail since the stream is being used.
    final Optional<CommandStatus> dropStreamCommandStatus2 = statementExecutor.getStatus(dropStreamCommandId2);
    assertThat(dropStreamCommandStatus2.isPresent(), equalTo(true));
    assertThat(dropStreamCommandStatus2.get().getStatus(), CoreMatchers.equalTo(CommandStatus.Status.ERROR));
    assertThat(dropStreamCommandStatus2.get().getMessage(), containsString("io.confluent.ksql.util.KsqlReferentialIntegrityException: Cannot drop USER1PV."));
    assertThat(dropStreamCommandStatus2.get().getMessage(), containsString("The following queries read from this source: []."));
    assertThat(dropStreamCommandStatus2.get().getMessage(), containsString("The following queries write into this source: [CSAS_USER1PV_1]."));
    assertThat(dropStreamCommandStatus2.get().getMessage(), containsString("You need to terminate them before dropping USER1PV."));
    final Command dropTableCommand1 = commandWithPlan("drop table table1;", ksqlConfig.getAllConfigPropsWithSecretsObfuscated());
    final CommandId dropTableCommandId1 = new CommandId(CommandId.Type.TABLE, "_TABLE1", CommandId.Action.DROP);
    handleStatement(statementExecutor, dropTableCommand1, dropTableCommandId1, Optional.empty(), 0);
    final Optional<CommandStatus> dropTableCommandStatus1 = statementExecutor.getStatus(dropTableCommandId1);
    // DROP statement should fail since the table is being used.
    Assert.assertTrue(dropTableCommandStatus1.isPresent());
    assertThat(dropTableCommandStatus1.get().getStatus(), CoreMatchers.equalTo(CommandStatus.Status.ERROR));
    assertThat(dropTableCommandStatus1.get().getMessage(), containsString("io.confluent.ksql.util.KsqlReferentialIntegrityException: Cannot drop TABLE1."));
    assertThat(dropTableCommandStatus1.get().getMessage(), containsString("The following queries read from this source: []."));
    assertThat(dropTableCommandStatus1.get().getMessage(), containsString("The following queries write into this source: [CTAS_TABLE1_2]."));
    assertThat(dropTableCommandStatus1.get().getMessage(), containsString("You need to terminate them before dropping TABLE1."));
}
Also used : CommandStatus(io.confluent.ksql.rest.entity.CommandStatus) CommandId(io.confluent.ksql.rest.entity.CommandId)

Example 12 with CommandId

use of io.confluent.ksql.rest.entity.CommandId in project ksql by confluentinc.

the class RecoveryTest method shouldRecoverWhenDropWithSourceConstraintsFoundOnMetastore.

@Test
public void shouldRecoverWhenDropWithSourceConstraintsFoundOnMetastore() {
    // Verify that an upgrade will not be affected if DROP commands are not in order.
    server1.submitCommands("CREATE STREAM A (COLUMN STRING) WITH (KAFKA_TOPIC='A', VALUE_FORMAT='JSON');", "CREATE STREAM B AS SELECT * FROM A;", "INSERT INTO B SELECT * FROM A;");
    // ksqlDB does not allow a DROP STREAM A because 'A' is used by 'B'.
    // However, if a ksqlDB upgrade is done, then this order can be possible.
    final Command dropACommand = new Command("DROP STREAM A;", Optional.of(ImmutableMap.of()), Optional.of(ImmutableMap.of()), Optional.of(KsqlPlan.ddlPlanCurrent("DROP STREAM A;", new DropSourceCommand(SourceName.of("A")))), Optional.of(Command.VERSION));
    // Add the DROP STREAM A manually to prevent server1 to fail if done on submitCommands()
    commands.add(new QueuedCommand(InternalTopicSerdes.serializer().serialize("", new CommandId(CommandId.Type.STREAM, "`A`", CommandId.Action.DROP)), InternalTopicSerdes.serializer().serialize("", dropACommand), Optional.empty(), (long) commands.size()));
    final KsqlServer recovered = new KsqlServer(commands);
    recovered.recover();
    // Original server has streams 'A' and 'B' because DROP statements weren't executed, but
    // the previous hack added them to the list of command topic statements
    assertThat(server1.ksqlEngine.getMetaStore().getAllDataSources().size(), is(2));
    assertThat(server1.ksqlEngine.getMetaStore().getAllDataSources(), hasKey(SourceName.of("A")));
    assertThat(server1.ksqlEngine.getMetaStore().getAllDataSources(), hasKey(SourceName.of("B")));
    assertThat(recovered.ksqlEngine.getAllLiveQueries().size(), is(2));
    // Recovered server has stream 'B' only. It restored the previous CREATE and DROP statements
    assertThat(recovered.ksqlEngine.getMetaStore().getAllDataSources().size(), is(1));
    assertThat(recovered.ksqlEngine.getMetaStore().getAllDataSources(), hasKey(SourceName.of("B")));
    assertThat(recovered.ksqlEngine.getAllLiveQueries().size(), is(2));
}
Also used : CreateStreamCommand(io.confluent.ksql.execution.ddl.commands.CreateStreamCommand) DropSourceCommand(io.confluent.ksql.execution.ddl.commands.DropSourceCommand) DropSourceCommand(io.confluent.ksql.execution.ddl.commands.DropSourceCommand) CommandId(io.confluent.ksql.rest.entity.CommandId) Test(org.junit.Test)

Example 13 with CommandId

use of io.confluent.ksql.rest.entity.CommandId in project ksql by confluentinc.

the class RecoveryTest method shouldRecoverWhenDropWithSourceConstraintsAndCreateSourceAgainFoundOnMetastore.

@Test
public void shouldRecoverWhenDropWithSourceConstraintsAndCreateSourceAgainFoundOnMetastore() {
    // Verify that an upgrade will not be affected if DROP commands are not in order.
    server1.submitCommands("CREATE STREAM A (COLUMN STRING) WITH (KAFKA_TOPIC='A', VALUE_FORMAT='JSON');", "CREATE STREAM B AS SELECT * FROM A;");
    // ksqlDB does not allow a DROP STREAM A because 'A' is used by 'B'.
    // However, if a ksqlDB upgrade is done, then this order can be possible.
    final Command dropACommand = new Command("DROP STREAM A;", Optional.of(ImmutableMap.of()), Optional.of(ImmutableMap.of()), Optional.of(KsqlPlan.ddlPlanCurrent("DROP STREAM A;", new DropSourceCommand(SourceName.of("A")))), Optional.of(Command.VERSION));
    // Add the DROP STREAM A manually to prevent server1 to fail if done on submitCommands()
    commands.add(new QueuedCommand(InternalTopicSerdes.serializer().serialize("", new CommandId(CommandId.Type.STREAM, "`A`", CommandId.Action.DROP)), InternalTopicSerdes.serializer().serialize("", dropACommand), Optional.empty(), (long) commands.size()));
    // Add CREATE STREAM after the DROP again
    final Command createACommand = new Command("CREATE STREAM A (COLUMN STRING) WITH (KAFKA_TOPIC='A', VALUE_FORMAT='JSON');", Optional.of(ImmutableMap.of()), Optional.of(ImmutableMap.of()), Optional.of(KsqlPlan.ddlPlanCurrent("CREATE STREAM A (COLUMN STRING) WITH (KAFKA_TOPIC='A', VALUE_FORMAT='JSON');", new CreateStreamCommand(SourceName.of("A"), LogicalSchema.builder().valueColumn(ColumnName.of("COLUMN"), SqlTypes.STRING).build(), Optional.empty(), "A", Formats.of(KeyFormat.nonWindowed(FormatInfo.of(FormatFactory.KAFKA.name()), SerdeFeatures.of()).getFormatInfo(), ValueFormat.of(FormatInfo.of(FormatFactory.JSON.name()), SerdeFeatures.of()).getFormatInfo(), SerdeFeatures.of(), SerdeFeatures.of()), Optional.empty(), Optional.of(false), Optional.of(false)))), Optional.of(Command.VERSION));
    // Add the CREATE STREAM A manually to prevent server1 to fail if done on submitCommands()
    commands.add(new QueuedCommand(InternalTopicSerdes.serializer().serialize("", new CommandId(CommandId.Type.STREAM, "`A`", CommandId.Action.CREATE)), InternalTopicSerdes.serializer().serialize("", createACommand), Optional.empty(), (long) commands.size()));
    final KsqlServer recovered = new KsqlServer(commands);
    recovered.recover();
    // Original server has both streams
    assertThat(server1.ksqlEngine.getMetaStore().getAllDataSources().size(), is(2));
    assertThat(server1.ksqlEngine.getMetaStore().getAllDataSources(), hasKey(SourceName.of("A")));
    assertThat(server1.ksqlEngine.getMetaStore().getAllDataSources(), hasKey(SourceName.of("B")));
    // Recovered server has only stream 'B'
    assertThat(recovered.ksqlEngine.getMetaStore().getAllDataSources().size(), is(2));
    assertThat(recovered.ksqlEngine.getMetaStore().getAllDataSources(), hasKey(SourceName.of("A")));
    assertThat(recovered.ksqlEngine.getMetaStore().getAllDataSources(), hasKey(SourceName.of("B")));
}
Also used : CreateStreamCommand(io.confluent.ksql.execution.ddl.commands.CreateStreamCommand) CreateStreamCommand(io.confluent.ksql.execution.ddl.commands.CreateStreamCommand) DropSourceCommand(io.confluent.ksql.execution.ddl.commands.DropSourceCommand) DropSourceCommand(io.confluent.ksql.execution.ddl.commands.DropSourceCommand) CommandId(io.confluent.ksql.rest.entity.CommandId) Test(org.junit.Test)

Example 14 with CommandId

use of io.confluent.ksql.rest.entity.CommandId in project ksql by confluentinc.

the class ClientTest method shouldSendSessionVariablesToKsqlEndpoint.

@Test
public void shouldSendSessionVariablesToKsqlEndpoint() throws Exception {
    // Given:
    javaClient.define("a", "a");
    final CommandStatusEntity entity = new CommandStatusEntity("CSAS;", new CommandId("STREAM", "FOO", "CREATE"), new CommandStatus(CommandStatus.Status.SUCCESS, "Success", Optional.of(new QueryId("CSAS_0"))), 0L);
    testEndpoints.setKsqlEndpointResponse(Collections.singletonList(entity));
    // When:
    javaClient.executeStatement("CSAS;").get();
    // Then:
    assertThat(testEndpoints.getLastSessionVariables(), is(new JsonObject().put("a", "a")));
}
Also used : QueryId(io.confluent.ksql.query.QueryId) PushQueryId(io.confluent.ksql.rest.entity.PushQueryId) JsonObject(io.vertx.core.json.JsonObject) CommandStatus(io.confluent.ksql.rest.entity.CommandStatus) CommandId(io.confluent.ksql.rest.entity.CommandId) CommandStatusEntity(io.confluent.ksql.rest.entity.CommandStatusEntity) BaseApiTest(io.confluent.ksql.api.BaseApiTest) Test(org.junit.Test)

Example 15 with CommandId

use of io.confluent.ksql.rest.entity.CommandId in project ksql by confluentinc.

the class ClientTest method shouldExecuteStatementWithQueryId.

@Test
public void shouldExecuteStatementWithQueryId() throws Exception {
    // Given
    final CommandStatusEntity entity = new CommandStatusEntity("CSAS;", new CommandId("STREAM", "FOO", "CREATE"), new CommandStatus(CommandStatus.Status.SUCCESS, "Success", Optional.of(new QueryId("CSAS_0"))), 0L);
    testEndpoints.setKsqlEndpointResponse(Collections.singletonList(entity));
    final Map<String, Object> properties = ImmutableMap.of("auto.offset.reset", "earliest");
    // When
    final ExecuteStatementResult result = javaClient.executeStatement("CSAS;", properties).get();
    // Then
    assertThat(testEndpoints.getLastSql(), is("CSAS;"));
    assertThat(testEndpoints.getLastProperties(), is(new JsonObject().put("auto.offset.reset", "earliest")));
    assertThat(result.queryId(), is(Optional.of("CSAS_0")));
}
Also used : QueryId(io.confluent.ksql.query.QueryId) PushQueryId(io.confluent.ksql.rest.entity.PushQueryId) JsonObject(io.vertx.core.json.JsonObject) CommandStatus(io.confluent.ksql.rest.entity.CommandStatus) JsonObject(io.vertx.core.json.JsonObject) CommandId(io.confluent.ksql.rest.entity.CommandId) Matchers.containsString(org.hamcrest.Matchers.containsString) CommandStatusEntity(io.confluent.ksql.rest.entity.CommandStatusEntity) BaseApiTest(io.confluent.ksql.api.BaseApiTest) Test(org.junit.Test)

Aggregations

CommandId (io.confluent.ksql.rest.entity.CommandId)29 Test (org.junit.Test)20 CommandStatus (io.confluent.ksql.rest.entity.CommandStatus)13 CommandStatusEntity (io.confluent.ksql.rest.entity.CommandStatusEntity)7 QueryId (io.confluent.ksql.query.QueryId)6 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)6 BaseApiTest (io.confluent.ksql.api.BaseApiTest)5 KsqlException (io.confluent.ksql.util.KsqlException)5 Command (io.confluent.ksql.rest.server.computation.Command)4 JsonObject (io.vertx.core.json.JsonObject)4 Matchers.containsString (org.hamcrest.Matchers.containsString)4 IntegrationTest (io.confluent.common.utils.IntegrationTest)3 KsqlServerException (io.confluent.ksql.util.KsqlServerException)3 PersistentQueryMetadata (io.confluent.ksql.util.PersistentQueryMetadata)3 IOException (java.io.IOException)3 HashMap (java.util.HashMap)3 Map (java.util.Map)3 ExecutionException (java.util.concurrent.ExecutionException)3 TimeoutException (java.util.concurrent.TimeoutException)3 KsqlPlan (io.confluent.ksql.engine.KsqlPlan)2