use of io.confluent.ksql.execution.ddl.commands.CreateStreamCommand in project ksql by confluentinc.
the class KsqlPlanV1Test method shouldReturnCreateAsPersistentQueryTypeOnCreateStream.
@Test
public void shouldReturnCreateAsPersistentQueryTypeOnCreateStream() {
// Given:
final CreateStreamCommand ddlCommand = Mockito.mock(CreateStreamCommand.class);
final KsqlPlanV1 plan = new KsqlPlanV1("stmt", Optional.of(ddlCommand), Optional.of(queryPlan1));
// When/Then:
assertThat(plan.getPersistentQueryType(), is(Optional.of(KsqlConstants.PersistentQueryType.CREATE_AS)));
}
use of io.confluent.ksql.execution.ddl.commands.CreateStreamCommand in project ksql by confluentinc.
the class DdlCommandExecTest method shouldDropStreamIfConstraintExistsAndRestoreIsInProgress.
@Test
public void shouldDropStreamIfConstraintExistsAndRestoreIsInProgress() {
// Given:
final CreateStreamCommand stream1 = buildCreateStream(SourceName.of("s1"), SCHEMA, false, false);
final CreateStreamCommand stream2 = buildCreateStream(SourceName.of("s2"), SCHEMA, false, false);
final CreateStreamCommand stream3 = buildCreateStream(SourceName.of("s3"), SCHEMA, false, false);
cmdExec.execute(SQL_TEXT, stream1, true, Collections.emptySet());
cmdExec.execute(SQL_TEXT, stream2, true, Collections.singleton(SourceName.of("s1")));
cmdExec.execute(SQL_TEXT, stream3, true, Collections.singleton(SourceName.of("s1")));
// When:
final DropSourceCommand dropStream = buildDropSourceCommand(SourceName.of("s1"));
final DdlCommandResult result = cmdExec.execute(SQL_TEXT, dropStream, false, Collections.emptySet(), true);
// Then
assertThat(result.isSuccess(), is(true));
assertThat(result.getMessage(), equalTo(String.format("Source %s (topic: %s) was dropped.", STREAM_NAME, TOPIC_NAME)));
}
use of io.confluent.ksql.execution.ddl.commands.CreateStreamCommand in project ksql by confluentinc.
the class RecoveryTest method shouldRecoverWhenDropWithSourceConstraintsAndCreateSourceAgainFoundOnMetastore.
@Test
public void shouldRecoverWhenDropWithSourceConstraintsAndCreateSourceAgainFoundOnMetastore() {
// Verify that an upgrade will not be affected if DROP commands are not in order.
server1.submitCommands("CREATE STREAM A (COLUMN STRING) WITH (KAFKA_TOPIC='A', VALUE_FORMAT='JSON');", "CREATE STREAM B AS SELECT * FROM A;");
// ksqlDB does not allow a DROP STREAM A because 'A' is used by 'B'.
// However, if a ksqlDB upgrade is done, then this order can be possible.
final Command dropACommand = new Command("DROP STREAM A;", Optional.of(ImmutableMap.of()), Optional.of(ImmutableMap.of()), Optional.of(KsqlPlan.ddlPlanCurrent("DROP STREAM A;", new DropSourceCommand(SourceName.of("A")))), Optional.of(Command.VERSION));
// Add the DROP STREAM A manually to prevent server1 to fail if done on submitCommands()
commands.add(new QueuedCommand(InternalTopicSerdes.serializer().serialize("", new CommandId(CommandId.Type.STREAM, "`A`", CommandId.Action.DROP)), InternalTopicSerdes.serializer().serialize("", dropACommand), Optional.empty(), (long) commands.size()));
// Add CREATE STREAM after the DROP again
final Command createACommand = new Command("CREATE STREAM A (COLUMN STRING) WITH (KAFKA_TOPIC='A', VALUE_FORMAT='JSON');", Optional.of(ImmutableMap.of()), Optional.of(ImmutableMap.of()), Optional.of(KsqlPlan.ddlPlanCurrent("CREATE STREAM A (COLUMN STRING) WITH (KAFKA_TOPIC='A', VALUE_FORMAT='JSON');", new CreateStreamCommand(SourceName.of("A"), LogicalSchema.builder().valueColumn(ColumnName.of("COLUMN"), SqlTypes.STRING).build(), Optional.empty(), "A", Formats.of(KeyFormat.nonWindowed(FormatInfo.of(FormatFactory.KAFKA.name()), SerdeFeatures.of()).getFormatInfo(), ValueFormat.of(FormatInfo.of(FormatFactory.JSON.name()), SerdeFeatures.of()).getFormatInfo(), SerdeFeatures.of(), SerdeFeatures.of()), Optional.empty(), Optional.of(false), Optional.of(false)))), Optional.of(Command.VERSION));
// Add the CREATE STREAM A manually to prevent server1 to fail if done on submitCommands()
commands.add(new QueuedCommand(InternalTopicSerdes.serializer().serialize("", new CommandId(CommandId.Type.STREAM, "`A`", CommandId.Action.CREATE)), InternalTopicSerdes.serializer().serialize("", createACommand), Optional.empty(), (long) commands.size()));
final KsqlServer recovered = new KsqlServer(commands);
recovered.recover();
// Original server has both streams
assertThat(server1.ksqlEngine.getMetaStore().getAllDataSources().size(), is(2));
assertThat(server1.ksqlEngine.getMetaStore().getAllDataSources(), hasKey(SourceName.of("A")));
assertThat(server1.ksqlEngine.getMetaStore().getAllDataSources(), hasKey(SourceName.of("B")));
// Recovered server has only stream 'B'
assertThat(recovered.ksqlEngine.getMetaStore().getAllDataSources().size(), is(2));
assertThat(recovered.ksqlEngine.getMetaStore().getAllDataSources(), hasKey(SourceName.of("A")));
assertThat(recovered.ksqlEngine.getMetaStore().getAllDataSources(), hasKey(SourceName.of("B")));
}
use of io.confluent.ksql.execution.ddl.commands.CreateStreamCommand in project ksql by confluentinc.
the class CreateSourceFactoryTest method shouldCreateStreamCommandFromNodeOutput.
@Test
public void shouldCreateStreamCommandFromNodeOutput() {
// Given:
final KsqlTopic ksqlTopic = mock(KsqlTopic.class);
when(ksqlTopic.getKafkaTopicName()).thenReturn(TOPIC_NAME);
when(ksqlTopic.getKeyFormat()).thenReturn(SOME_KEY_FORMAT);
when(ksqlTopic.getValueFormat()).thenReturn(SOME_VALUE_FORMAT);
final KsqlStructuredDataOutputNode outputNode = mock(KsqlStructuredDataOutputNode.class);
when(outputNode.getSinkName()).thenReturn(Optional.of(SOME_NAME));
when(outputNode.getSchema()).thenReturn(EXPECTED_SCHEMA);
when(outputNode.getTimestampColumn()).thenReturn(Optional.of(TIMESTAMP_COLUMN));
when(outputNode.getKsqlTopic()).thenReturn(ksqlTopic);
// When:
final CreateStreamCommand result = createSourceFactory.createStreamCommand(outputNode);
// Then:
assertThat(result.getSourceName(), is(SOME_NAME));
assertThat(result.getSchema(), is(EXPECTED_SCHEMA));
assertThat(result.getTimestampColumn(), is(Optional.of(TIMESTAMP_COLUMN)));
assertThat(result.getTopicName(), is(TOPIC_NAME));
assertThat(result.getFormats(), is(Formats.from(ksqlTopic)));
assertThat(result.getWindowInfo(), is(Optional.empty()));
assertThat(result.isOrReplace(), is(false));
}
use of io.confluent.ksql.execution.ddl.commands.CreateStreamCommand in project ksql by confluentinc.
the class CreateSourceFactoryTest method shouldNotThrowOnKeyColumnThatIsNotCalledRowKey.
@Test
public void shouldNotThrowOnKeyColumnThatIsNotCalledRowKey() {
// Given:
final CreateStream statement = new CreateStream(SOME_NAME, TableElements.of(tableElement("someKey", new Type(SqlTypes.STRING), KEY_CONSTRAINT)), false, true, withProperties, false);
// When:
final CreateStreamCommand result = createSourceFactory.createStreamCommand(statement, ksqlConfig);
// Then:
assertThat(result.getSchema().key(), contains(keyColumn(ColumnName.of("someKey"), SqlTypes.STRING)));
}
Aggregations