use of io.confluent.ksql.execution.streams.materialization.Row in project ksql by confluentinc.
the class KsMaterializationFunctionalTest method shouldQueryMaterializedTableForAggregatedTable.
@Test
public void shouldQueryMaterializedTableForAggregatedTable() {
// Given:
final PersistentQueryMetadata query = executeQuery("CREATE TABLE " + output + " AS" + " SELECT USERID, COUNT(*) FROM " + USER_TABLE + " GROUP BY USERID;");
final LogicalSchema schema = schema("KSQL_COL_0", SqlTypes.BIGINT);
final Map<String, GenericRow> rows = waitForUniqueUserRows(STRING_DESERIALIZER, schema);
// When:
final Materialization materialization = query.getMaterialization(queryId, contextStacker).get();
// Then:
assertThat(materialization.windowType(), is(Optional.empty()));
final MaterializedTable table = materialization.nonWindowed();
rows.forEach((rowKey, value) -> {
final GenericKey key = genericKey(rowKey);
final Iterator<Row> rowIterator = withRetry(() -> table.get(key, PARTITION));
assertThat(rowIterator.hasNext(), is(true));
final Row row = rowIterator.next();
assertThat(row.schema(), is(schema));
assertThat(row.key(), is(key));
assertThat(row.value(), is(value));
});
final GenericKey key = genericKey("Won't find me");
assertThat("unknown key", withRetry(() -> table.get(key, PARTITION).hasNext()), is(false));
}
use of io.confluent.ksql.execution.streams.materialization.Row in project ksql by confluentinc.
the class KsMaterializationFunctionalTest method shouldHandleHavingClause.
@Test
public void shouldHandleHavingClause() {
// Note: HAVING clause are handled centrally by KsqlMaterialization. This logic will have been
// installed as part of building the below statement:
// Given:
final PersistentQueryMetadata query = executeQuery("CREATE TABLE " + output + " AS" + " SELECT USERID, COUNT(*) AS COUNT FROM " + USER_TABLE + " GROUP BY USERID" + " HAVING SUM(REGISTERTIME) > 2;");
final LogicalSchema schema = schema("COUNT", SqlTypes.BIGINT);
final int matches = (int) USER_DATA_PROVIDER.data().values().stream().filter(row -> ((Long) row.get(0)) > 2).count();
final Map<String, GenericRow> rows = waitForUniqueUserRows(matches, STRING_DESERIALIZER, schema);
// When:
final Materialization materialization = query.getMaterialization(queryId, contextStacker).get();
// Then:
final MaterializedTable table = materialization.nonWindowed();
rows.forEach((rowKey, value) -> {
// Rows passing the HAVING clause:
final GenericKey key = genericKey(rowKey);
final List<Row> rowList = withRetry(() -> Lists.newArrayList(table.get(key, PARTITION)));
assertThat(rowList.size(), is(1));
assertThat(rowList.get(0).schema(), is(schema));
assertThat(rowList.get(0).key(), is(key));
assertThat(rowList.get(0).value(), is(value));
});
USER_DATA_PROVIDER.data().entries().stream().filter(e -> !rows.containsKey(e.getKey().get(0))).forEach(e -> {
// Rows filtered by the HAVING clause:
final List<Row> rowList = withRetry(() -> Lists.newArrayList(table.get(e.getKey(), PARTITION)));
assertThat(rowList.isEmpty(), is(true));
});
}
use of io.confluent.ksql.execution.streams.materialization.Row in project ksql by confluentinc.
the class KsMaterializedTableIQv2 method get.
@Override
public KsMaterializedQueryResult<Row> get(final int partition, final Optional<Position> position) {
try {
final RangeQuery<GenericKey, ValueAndTimestamp<GenericRow>> query = RangeQuery.withNoBounds();
StateQueryRequest<KeyValueIterator<GenericKey, ValueAndTimestamp<GenericRow>>> request = inStore(stateStore.getStateStoreName()).withQuery(query).withPartitions(ImmutableSet.of(partition));
if (position.isPresent()) {
request = request.withPositionBound(PositionBound.at(position.get()));
}
final StateQueryResult<KeyValueIterator<GenericKey, ValueAndTimestamp<GenericRow>>> result = stateStore.getKafkaStreams().query(request);
final QueryResult<KeyValueIterator<GenericKey, ValueAndTimestamp<GenericRow>>> queryResult = result.getPartitionResults().get(partition);
if (queryResult.isFailure()) {
throw failedQueryException(queryResult);
} else if (queryResult.getResult() == null) {
return KsMaterializedQueryResult.rowIteratorWithPosition(Collections.emptyIterator(), queryResult.getPosition());
} else {
final KeyValueIterator<GenericKey, ValueAndTimestamp<GenericRow>> iterator = queryResult.getResult();
return KsMaterializedQueryResult.rowIteratorWithPosition(Streams.stream(IteratorUtil.onComplete(iterator, iterator::close)).map(keyValue -> Row.of(stateStore.schema(), keyValue.key, keyValue.value.value(), keyValue.value.timestamp())).iterator(), queryResult.getPosition());
}
} catch (final NotUpToBoundException | MaterializationException e) {
throw e;
} catch (final Exception e) {
throw new MaterializationException("Failed to scan materialized table", e);
}
}
use of io.confluent.ksql.execution.streams.materialization.Row in project ksql by confluentinc.
the class KsMaterializedTableIQv2 method get.
// CHECKSTYLE_RULES.OFF: CyclomaticComplexity
@Override
public KsMaterializedQueryResult<Row> get(final int partition, final GenericKey from, final GenericKey to, final Optional<Position> position) {
// CHECKSTYLE_RULES.ON: CyclomaticComplexity
try {
final RangeQuery<GenericKey, ValueAndTimestamp<GenericRow>> query;
if (from != null && to != null) {
query = RangeQuery.withRange(from, to);
} else if (from == null && to != null) {
query = RangeQuery.withUpperBound(to);
} else if (from != null && to == null) {
query = RangeQuery.withLowerBound(from);
} else {
query = RangeQuery.withNoBounds();
}
StateQueryRequest<KeyValueIterator<GenericKey, ValueAndTimestamp<GenericRow>>> request = inStore(stateStore.getStateStoreName()).withQuery(query).withPartitions(ImmutableSet.of(partition));
if (position.isPresent()) {
request = request.withPositionBound(PositionBound.at(position.get()));
}
final StateQueryResult<KeyValueIterator<GenericKey, ValueAndTimestamp<GenericRow>>> result = stateStore.getKafkaStreams().query(request);
final QueryResult<KeyValueIterator<GenericKey, ValueAndTimestamp<GenericRow>>> queryResult = result.getPartitionResults().get(partition);
if (queryResult.isFailure()) {
throw failedQueryException(queryResult);
} else if (queryResult.getResult() == null) {
return KsMaterializedQueryResult.rowIteratorWithPosition(Collections.emptyIterator(), queryResult.getPosition());
} else {
final KeyValueIterator<GenericKey, ValueAndTimestamp<GenericRow>> iterator = queryResult.getResult();
return KsMaterializedQueryResult.rowIteratorWithPosition(Streams.stream(IteratorUtil.onComplete(iterator, iterator::close)).map(keyValue -> Row.of(stateStore.schema(), keyValue.key, keyValue.value.value(), keyValue.value.timestamp())).iterator(), queryResult.getPosition());
}
} catch (final NotUpToBoundException | MaterializationException e) {
throw e;
} catch (final Exception e) {
throw new MaterializationException("Failed to range scan materialized table", e);
}
}
use of io.confluent.ksql.execution.streams.materialization.Row in project ksql by confluentinc.
the class KeyedTableLookupOperator method getMatIterator.
private Iterator<Row> getMatIterator(final KsqlKey ksqlKey) {
if (!(nextKey instanceof KeyConstraint)) {
throw new IllegalStateException(String.format("Keyed lookup queries should be done with " + "key constraints: %s", ksqlKey.toString()));
}
final KeyConstraint keyConstraintKey = (KeyConstraint) ksqlKey;
final Iterator<Row> result;
if (keyConstraintKey.getOperator() == ConstraintOperator.EQUAL) {
result = mat.nonWindowed().get(ksqlKey.getKey(), nextLocation.getPartition(), consistencyOffsetVector);
} else if (keyConstraintKey.getOperator() == ConstraintOperator.GREATER_THAN || keyConstraintKey.getOperator() == ConstraintOperator.GREATER_THAN_OR_EQUAL) {
// Underlying store will always return keys inclusive the endpoints
// and filtering is used to trim start and end of the range in case of ">"
final GenericKey fromKey = keyConstraintKey.getKey();
final GenericKey toKey = null;
result = mat.nonWindowed().get(nextLocation.getPartition(), fromKey, toKey, consistencyOffsetVector);
} else if (keyConstraintKey.getOperator() == ConstraintOperator.LESS_THAN || keyConstraintKey.getOperator() == ConstraintOperator.LESS_THAN_OR_EQUAL) {
// Underlying store will always return keys inclusive the endpoints
// and filtering is used to trim start and end of the range in case of "<"
final GenericKey fromKey = null;
final GenericKey toKey = keyConstraintKey.getKey();
result = mat.nonWindowed().get(nextLocation.getPartition(), fromKey, toKey, consistencyOffsetVector);
} else {
throw new IllegalStateException(String.format("Invalid comparator type " + keyConstraintKey.getOperator()));
}
return result;
}
Aggregations