use of io.confluent.ksql.util.KsqlStatementException in project ksql by confluentinc.
the class KsqlEngineTest method shouldFailDropStreamWhenAnInsertQueryIsReadingTheStream.
@Test
public void shouldFailDropStreamWhenAnInsertQueryIsReadingTheStream() {
// Given:
KsqlEngineTestUtil.execute(serviceContext, ksqlEngine, "create stream bar as select * from test1;" + "create stream foo as select * from test1;" + "insert into foo select * from bar;", ksqlConfig, Collections.emptyMap());
// When:
final KsqlStatementException e = assertThrows(KsqlStatementException.class, () -> KsqlEngineTestUtil.execute(serviceContext, ksqlEngine, "drop stream bar;", ksqlConfig, Collections.emptyMap()));
// Then:
assertThat(e, rawMessage(is("Cannot drop BAR.\n" + "The following queries read from this source: [INSERTQUERY_2].\n" + "The following queries write into this source: [].\n" + "You need to terminate them before dropping BAR.")));
assertThat(e, statementText(is("drop stream bar;")));
}
use of io.confluent.ksql.util.KsqlStatementException in project ksql by confluentinc.
the class KsqlEngineTest method shouldThrowIfStatementMissingTopicConfig.
@Test
public void shouldThrowIfStatementMissingTopicConfig() {
final List<ParsedStatement> parsed = parse("CREATE TABLE FOO (viewtime BIGINT, pageid VARCHAR) WITH (VALUE_FORMAT='AVRO', KEY_FORMAT='KAFKA');" + "CREATE STREAM FOO (viewtime BIGINT, pageid VARCHAR) WITH (VALUE_FORMAT='AVRO', KEY_FORMAT='KAFKA');" + "CREATE TABLE FOO (viewtime BIGINT, pageid VARCHAR) WITH (VALUE_FORMAT='JSON', KEY_FORMAT='KAFKA');" + "CREATE STREAM FOO (viewtime BIGINT, pageid VARCHAR) WITH (VALUE_FORMAT='JSON', KEY_FORMAT='KAFKA');");
for (final ParsedStatement statement : parsed) {
try {
ksqlEngine.prepare(statement);
Assert.fail();
} catch (final KsqlStatementException e) {
assertThat(e.getMessage(), containsString("Missing required property \"KAFKA_TOPIC\" which has no default value."));
}
}
}
use of io.confluent.ksql.util.KsqlStatementException in project ksql by confluentinc.
the class KsqlEngineTest method shouldFailDropStreamWhenMultipleStreamsAreReadingTheTable.
@Test
public void shouldFailDropStreamWhenMultipleStreamsAreReadingTheTable() {
// Given:
KsqlEngineTestUtil.execute(serviceContext, ksqlEngine, "create stream bar as select * from test1;" + "create stream foo as select * from bar;" + "create stream foo2 as select * from bar;", ksqlConfig, Collections.emptyMap());
// When:
final KsqlStatementException e = assertThrows(KsqlStatementException.class, () -> KsqlEngineTestUtil.execute(serviceContext, ksqlEngine, "drop stream bar;", ksqlConfig, Collections.emptyMap()));
// Then:
assertThat(e, rawMessage(is("Cannot drop BAR.\n" + "The following streams and/or tables read from this source: [FOO, FOO2].\n" + "You need to drop them before dropping BAR.")));
assertThat(e, statementText(is("drop stream bar;")));
}
use of io.confluent.ksql.util.KsqlStatementException in project ksql by confluentinc.
the class KsqlEngineTest method shouldThrowIfSchemaNotPresent.
@Test
public void shouldThrowIfSchemaNotPresent() {
// Given:
givenTopicsExist("bar");
// When:
final KsqlStatementException e = assertThrows(KsqlStatementException.class, () -> execute(serviceContext, ksqlEngine, "create stream bar with (key_format='kafka', value_format='avro', kafka_topic='bar');", ksqlConfig, emptyMap()));
// Then:
assertThat(e, rawMessage(containsString("The statement does not define any columns.")));
assertThat(e, statementText(is("create stream bar with (key_format='kafka', value_format='avro', kafka_topic='bar');")));
}
use of io.confluent.ksql.util.KsqlStatementException in project ksql by confluentinc.
the class KsqlEngineTest method shouldThrowOnInsertIntoWithKeyMismatch.
@Test
public void shouldThrowOnInsertIntoWithKeyMismatch() {
execute(serviceContext, ksqlEngine, "create stream bar as select * from orders;", ksqlConfig, emptyMap());
// When:
final KsqlStatementException e = assertThrows(KsqlStatementException.class, () -> KsqlEngineTestUtil.execute(serviceContext, ksqlEngine, "insert into bar select * from orders partition by orderid;", ksqlConfig, Collections.emptyMap()));
// Then:
assertThat(e, rawMessage(containsString("Incompatible schema between results and sink.")));
assertThat(e, rawMessage(containsString("Result schema is `ORDERID` BIGINT KEY, ")));
assertThat(e, rawMessage(containsString("Sink schema is `ORDERTIME` BIGINT KEY, ")));
assertThat(e, statementText(is("insert into bar select * from orders partition by orderid;")));
}
Aggregations