use of io.confluent.ksql.execution.streams.materialization.WindowedRow in project ksql by confluentinc.
the class KsMaterializedWindowTableIQv2 method get.
public KsMaterializedQueryResult<WindowedRow> get(final int partition, final Range<Instant> windowStartBounds, final Range<Instant> windowEndBounds, final Optional<Position> position) {
try {
final Instant lower = calculateLowerBound(windowStartBounds, windowEndBounds);
final Instant upper = calculateUpperBound(windowStartBounds, windowEndBounds);
final WindowRangeQuery<GenericKey, ValueAndTimestamp<GenericRow>> query = WindowRangeQuery.withWindowStartRange(lower, upper);
StateQueryRequest<KeyValueIterator<Windowed<GenericKey>, ValueAndTimestamp<GenericRow>>> request = inStore(stateStore.getStateStoreName()).withQuery(query);
if (position.isPresent()) {
request = request.withPositionBound(PositionBound.at(position.get()));
}
final StateQueryResult<KeyValueIterator<Windowed<GenericKey>, ValueAndTimestamp<GenericRow>>> result = stateStore.getKafkaStreams().query(request);
final QueryResult<KeyValueIterator<Windowed<GenericKey>, ValueAndTimestamp<GenericRow>>> queryResult = result.getPartitionResults().get(partition);
if (queryResult.isFailure()) {
throw failedQueryException(queryResult);
}
final KeyValueIterator<Windowed<GenericKey>, ValueAndTimestamp<GenericRow>> iterator = queryResult.getResult();
return KsMaterializedQueryResult.rowIteratorWithPosition(Streams.stream(IteratorUtil.onComplete(iterator, iterator::close)).map(next -> {
final Instant windowStart = next.key.window().startTime();
if (!windowStartBounds.contains(windowStart)) {
return null;
}
final Instant windowEnd = next.key.window().endTime();
if (!windowEndBounds.contains(windowEnd)) {
return null;
}
final TimeWindow window = new TimeWindow(windowStart.toEpochMilli(), windowEnd.toEpochMilli());
final WindowedRow row = WindowedRow.of(stateStore.schema(), new Windowed<>(next.key.key(), window), next.value.value(), next.value.timestamp());
return row;
}).filter(Objects::nonNull).iterator(), queryResult.getPosition());
} catch (final NotUpToBoundException | MaterializationException e) {
throw e;
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
}
use of io.confluent.ksql.execution.streams.materialization.WindowedRow in project ksql by confluentinc.
the class WindowedTableScanOperator method next.
@Override
public Object next() {
if (shouldCancelOperations.isDone()) {
return null;
}
while (!resultIterator.hasNext()) {
// Exhausted resultIterator
if (partitionLocationIterator.hasNext()) {
nextLocation = partitionLocationIterator.next();
} else {
// Exhausted all iterators
return null;
}
if (nextLocation.getKeys().isPresent()) {
throw new IllegalStateException("Table scans should not be done with keys");
}
updateIterator();
}
returnedRows++;
final WindowedRow row = resultIterator.next();
return QueryRowImpl.of(row.schema(), row.key(), row.window(), row.value(), row.rowTime());
}
use of io.confluent.ksql.execution.streams.materialization.WindowedRow in project ksql by confluentinc.
the class KeyedWindowedTableLookupOperator method next.
@Override
public Object next() {
while (!resultIterator.hasNext()) {
// Exhausted resultIterator
if (!keyIterator.hasNext()) {
if (partitionLocationIterator.hasNext()) {
nextLocation = partitionLocationIterator.next();
} else {
// Exhausted all iterators
return null;
}
if (!nextLocation.getKeys().isPresent()) {
throw new IllegalStateException("Table lookup queries should be done with keys");
}
keyIterator = nextLocation.getKeys().get().iterator();
}
nextKey = keyIterator.next();
updateIterator(getWindowBounds(nextKey));
}
returnedRows++;
final WindowedRow row = resultIterator.next();
return QueryRowImpl.of(row.schema(), row.key(), row.window(), row.value(), row.rowTime());
}
use of io.confluent.ksql.execution.streams.materialization.WindowedRow in project ksql by confluentinc.
the class KsMaterializationFunctionalTest method shouldQueryMaterializedTableForSessionWindowed.
@Test
public void shouldQueryMaterializedTableForSessionWindowed() {
// Given:
final PersistentQueryMetadata query = executeQuery("CREATE TABLE " + output + " AS" + " SELECT USERID, COUNT(*) AS COUNT FROM " + USER_STREAM + " WINDOW SESSION (" + WINDOW_SIZE.getSeconds() + " SECONDS)" + " GROUP BY USERID;");
final LogicalSchema schema = schema("COUNT", SqlTypes.BIGINT);
final Map<Windowed<String>, GenericRow> rows = waitForUniqueUserRows(SESSION_WINDOWED_DESERIALIZER, schema);
// When:
final Materialization materialization = query.getMaterialization(queryId, contextStacker).get();
// Then:
assertThat(materialization.windowType(), is(Optional.of(WindowType.SESSION)));
final MaterializedWindowedTable table = materialization.windowed();
rows.forEach((k, v) -> {
final Window w = Window.of(k.window().startTime(), k.window().endTime());
final GenericKey key = genericKey(k.key());
final List<WindowedRow> resultAtWindowStart = withRetry(() -> Lists.newArrayList(table.get(key, PARTITION, Range.singleton(w.start()), Range.all())));
assertThat("at exact window start", resultAtWindowStart, hasSize(1));
assertThat(resultAtWindowStart.get(0).schema(), is(schema));
assertThat(resultAtWindowStart.get(0).window(), is(Optional.of(w)));
assertThat(resultAtWindowStart.get(0).key(), is(key));
assertThat(resultAtWindowStart.get(0).value(), is(v));
final List<WindowedRow> resultAtWindowEnd = withRetry(() -> Lists.newArrayList(table.get(key, PARTITION, Range.all(), Range.singleton(w.end()))));
assertThat("at exact window end", resultAtWindowEnd, hasSize(1));
final List<WindowedRow> resultFromRange = withRetry(() -> Lists.newArrayList(table.get(key, PARTITION, Range.closed(w.start().minusMillis(1), w.start().plusMillis(1)), Range.all())));
assertThat("range including window start", resultFromRange, is(resultAtWindowStart));
final List<WindowedRow> resultPast = withRetry(() -> Lists.newArrayList(table.get(key, PARTITION, Range.closed(w.start().plusMillis(1), w.start().plusMillis(1)), Range.all())));
assertThat("past start", resultPast, is(empty()));
});
}
use of io.confluent.ksql.execution.streams.materialization.WindowedRow in project ksql by confluentinc.
the class KsMaterializationFunctionalTest method shouldQueryMaterializedTableForHoppingWindowed.
@Test
public void shouldQueryMaterializedTableForHoppingWindowed() {
// Given:
final PersistentQueryMetadata query = executeQuery("CREATE TABLE " + output + " AS" + " SELECT USERID, COUNT(*) AS COUNT FROM " + USER_STREAM + " WINDOW HOPPING (SIZE " + WINDOW_SIZE.getSeconds() + " SECONDS," + " ADVANCE BY " + WINDOW_SIZE.getSeconds() + " SECONDS)" + " GROUP BY USERID;");
final LogicalSchema schema = schema("COUNT", SqlTypes.BIGINT);
final Map<Windowed<String>, GenericRow> rows = waitForUniqueUserRows(TIME_WINDOWED_DESERIALIZER, schema);
// When:
final Materialization materialization = query.getMaterialization(queryId, contextStacker).get();
// Then:
assertThat(materialization.windowType(), is(Optional.of(WindowType.HOPPING)));
final MaterializedWindowedTable table = materialization.windowed();
rows.forEach((k, v) -> {
final Window w = Window.of(k.window().startTime(), k.window().endTime());
final GenericKey key = genericKey(k.key());
final List<WindowedRow> resultAtWindowStart = withRetry(() -> Lists.newArrayList(table.get(key, PARTITION, Range.singleton(w.start()), Range.all())));
assertThat("at exact window start", resultAtWindowStart, hasSize(1));
assertThat(resultAtWindowStart.get(0).schema(), is(schema));
assertThat(resultAtWindowStart.get(0).window(), is(Optional.of(w)));
assertThat(resultAtWindowStart.get(0).key(), is(key));
assertThat(resultAtWindowStart.get(0).value(), is(v));
final List<WindowedRow> resultAtWindowEnd = withRetry(() -> Lists.newArrayList(table.get(key, PARTITION, Range.all(), Range.singleton(w.end()))));
assertThat("at exact window end", resultAtWindowEnd, hasSize(1));
final List<WindowedRow> resultFromRange = withRetry(() -> Lists.newArrayList(table.get(key, PARTITION, Range.closed(w.start().minusMillis(1), w.start().plusMillis(1)), Range.all())));
assertThat("range including window start", resultFromRange, is(resultAtWindowStart));
final List<WindowedRow> resultPast = withRetry(() -> Lists.newArrayList(table.get(key, PARTITION, Range.closed(w.start().plusMillis(1), w.start().plusMillis(1)), Range.all())));
assertThat("past start", resultPast, is(empty()));
});
}
Aggregations