use of org.apache.kafka.streams.state.ValueAndTimestamp in project kafka by apache.
the class GlobalKTableIntegrationTest method shouldRestoreGlobalInMemoryKTableOnRestart.
@Test
public void shouldRestoreGlobalInMemoryKTableOnRestart() throws Exception {
builder = new StreamsBuilder();
globalTable = builder.globalTable(globalTableTopic, Consumed.with(Serdes.Long(), Serdes.String()), Materialized.as(Stores.inMemoryKeyValueStore(globalStore)));
produceInitialGlobalTableValues();
startStreams();
ReadOnlyKeyValueStore<Long, String> store = IntegrationTestUtils.getStore(globalStore, kafkaStreams, QueryableStoreTypes.keyValueStore());
assertNotNull(store);
assertThat(store.approximateNumEntries(), equalTo(4L));
ReadOnlyKeyValueStore<Long, ValueAndTimestamp<String>> timestampedStore = IntegrationTestUtils.getStore(globalStore, kafkaStreams, QueryableStoreTypes.timestampedKeyValueStore());
assertNotNull(timestampedStore);
assertThat(timestampedStore.approximateNumEntries(), equalTo(4L));
kafkaStreams.close();
startStreams();
store = IntegrationTestUtils.getStore(globalStore, kafkaStreams, QueryableStoreTypes.keyValueStore());
assertThat(store.approximateNumEntries(), equalTo(4L));
timestampedStore = IntegrationTestUtils.getStore(globalStore, kafkaStreams, QueryableStoreTypes.timestampedKeyValueStore());
assertThat(timestampedStore.approximateNumEntries(), equalTo(4L));
}
use of org.apache.kafka.streams.state.ValueAndTimestamp in project kafka by apache.
the class IQv2IntegrationTest method shouldRejectNonRunningActive.
@Test
public void shouldRejectNonRunningActive() throws NoSuchFieldException, IllegalAccessException {
final KeyQuery<Integer, ValueAndTimestamp<Integer>> query = KeyQuery.withKey(1);
final StateQueryRequest<ValueAndTimestamp<Integer>> request = inStore(STORE_NAME).withQuery(query).requireActive();
final Set<Integer> partitions = mkSet(0, 1);
kafkaStreams.start();
final Field threadsField = KafkaStreams.class.getDeclaredField("threads");
threadsField.setAccessible(true);
@SuppressWarnings("unchecked") final List<StreamThread> threads = (List<StreamThread>) threadsField.get(kafkaStreams);
final StreamThread streamThread = threads.get(0);
final Field stateLock = StreamThread.class.getDeclaredField("stateLock");
stateLock.setAccessible(true);
final Object lock = stateLock.get(streamThread);
// wait for the desired partitions to be assigned
IntegrationTestUtils.iqv2WaitForPartitions(kafkaStreams, inStore(STORE_NAME).withQuery(query), partitions);
// then lock the thread state, change it, and make our assertions.
synchronized (lock) {
final Field stateField = StreamThread.class.getDeclaredField("state");
stateField.setAccessible(true);
stateField.set(streamThread, State.PARTITIONS_ASSIGNED);
final StateQueryResult<ValueAndTimestamp<Integer>> result = IntegrationTestUtils.iqv2WaitForPartitions(kafkaStreams, request, partitions);
assertThat(result.getPartitionResults().keySet(), is(partitions));
for (final Integer partition : partitions) {
assertThat(result.getPartitionResults().get(partition).isFailure(), is(true));
assertThat(result.getPartitionResults().get(partition).getFailureReason(), is(FailureReason.NOT_ACTIVE));
assertThat(result.getPartitionResults().get(partition).getFailureMessage(), is("Query requires a running active task," + " but partition was in state PARTITIONS_ASSIGNED and was active."));
}
}
}
use of org.apache.kafka.streams.state.ValueAndTimestamp in project kafka by apache.
the class IQv2IntegrationTest method shouldNotRequireQueryHandler.
@Test
public void shouldNotRequireQueryHandler() {
final KeyQuery<Integer, ValueAndTimestamp<Integer>> query = KeyQuery.withKey(1);
final int partition = 1;
final Set<Integer> partitions = singleton(partition);
final StateQueryRequest<ValueAndTimestamp<Integer>> request = inStore(STORE_NAME).withQuery(query).withPartitions(partitions);
final StreamsBuilder builder = new StreamsBuilder();
builder.table(INPUT_TOPIC_NAME, Consumed.with(Serdes.Integer(), Serdes.Integer()), Materialized.as(new KeyValueBytesStoreSupplier() {
@Override
public String name() {
return STORE_NAME;
}
@Override
public KeyValueStore<Bytes, byte[]> get() {
return new KeyValueStore<Bytes, byte[]>() {
private boolean open = false;
private Map<Bytes, byte[]> map = new HashMap<>();
private Position position;
private StateStoreContext context;
@Override
public void put(final Bytes key, final byte[] value) {
map.put(key, value);
StoreQueryUtils.updatePosition(position, context);
}
@Override
public byte[] putIfAbsent(final Bytes key, final byte[] value) {
StoreQueryUtils.updatePosition(position, context);
return map.putIfAbsent(key, value);
}
@Override
public void putAll(final List<KeyValue<Bytes, byte[]>> entries) {
StoreQueryUtils.updatePosition(position, context);
for (final KeyValue<Bytes, byte[]> entry : entries) {
map.put(entry.key, entry.value);
}
}
@Override
public byte[] delete(final Bytes key) {
StoreQueryUtils.updatePosition(position, context);
return map.remove(key);
}
@Override
public String name() {
return STORE_NAME;
}
@Deprecated
@Override
public void init(final ProcessorContext context, final StateStore root) {
throw new UnsupportedOperationException();
}
@Override
public void init(final StateStoreContext context, final StateStore root) {
context.register(root, (key, value) -> put(Bytes.wrap(key), value));
this.open = true;
this.position = Position.emptyPosition();
this.context = context;
}
@Override
public void flush() {
}
@Override
public void close() {
this.open = false;
map.clear();
}
@Override
public boolean persistent() {
return false;
}
@Override
public boolean isOpen() {
return open;
}
@Override
public Position getPosition() {
return position;
}
@Override
public byte[] get(final Bytes key) {
return map.get(key);
}
@Override
public KeyValueIterator<Bytes, byte[]> range(final Bytes from, final Bytes to) {
throw new UnsupportedOperationException();
}
@Override
public KeyValueIterator<Bytes, byte[]> all() {
throw new UnsupportedOperationException();
}
@Override
public long approximateNumEntries() {
return map.size();
}
};
}
@Override
public String metricsScope() {
return "nonquery";
}
}));
kafkaStreams = new KafkaStreams(builder.build(), streamsConfiguration());
kafkaStreams.cleanUp();
kafkaStreams.start();
final StateQueryResult<ValueAndTimestamp<Integer>> result = IntegrationTestUtils.iqv2WaitForResult(kafkaStreams, request);
final QueryResult<ValueAndTimestamp<Integer>> queryResult = result.getPartitionResults().get(partition);
assertThat(queryResult.isFailure(), is(true));
assertThat(queryResult.getFailureReason(), is(FailureReason.UNKNOWN_QUERY_TYPE));
assertThat(queryResult.getFailureMessage(), matchesPattern("This store (.*) doesn't know how to execute the given query (.*)." + " Contact the store maintainer if you need support for a new query type."));
}
use of org.apache.kafka.streams.state.ValueAndTimestamp in project kafka by apache.
the class KStreamSlidingWindowAggregateTest method testEarlyRecordsSmallInput.
@Test
public void testEarlyRecordsSmallInput() {
final StreamsBuilder builder = new StreamsBuilder();
final String topic = "topic";
final KTable<Windowed<String>, String> table2 = builder.stream(topic, Consumed.with(Serdes.String(), Serdes.String())).groupByKey(Grouped.with(Serdes.String(), Serdes.String())).windowedBy(SlidingWindows.ofTimeDifferenceAndGrace(ofMillis(50), ofMillis(200))).aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, Materialized.<String, String, WindowStore<Bytes, byte[]>>as("topic-Canonized").withValueSerde(Serdes.String()));
final MockApiProcessorSupplier<Windowed<String>, String, Void, Void> supplier = new MockApiProcessorSupplier<>();
table2.toStream().process(supplier);
try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
final TestInputTopic<String, String> inputTopic = driver.createInputTopic(topic, new StringSerializer(), new StringSerializer());
inputTopic.pipeInput("A", "1", 0L);
inputTopic.pipeInput("A", "2", 5L);
inputTopic.pipeInput("A", "3", 6L);
inputTopic.pipeInput("A", "4", 3L);
inputTopic.pipeInput("A", "5", 13L);
inputTopic.pipeInput("A", "6", 10L);
}
final Map<Long, ValueAndTimestamp<String>> actual = new HashMap<>();
for (final KeyValueTimestamp<Windowed<String>, String> entry : supplier.theCapturedProcessor().processed()) {
final Windowed<String> window = entry.key();
final Long start = window.window().start();
final ValueAndTimestamp<String> valueAndTimestamp = ValueAndTimestamp.make(entry.value(), entry.timestamp());
if (actual.putIfAbsent(start, valueAndTimestamp) != null) {
actual.replace(start, valueAndTimestamp);
}
}
final Map<Long, ValueAndTimestamp<String>> expected = new HashMap<>();
expected.put(0L, ValueAndTimestamp.make("0+1+2+3+4+5+6", 13L));
expected.put(1L, ValueAndTimestamp.make("0+2+3+4+5+6", 13L));
expected.put(4L, ValueAndTimestamp.make("0+2+3+5+6", 13L));
expected.put(6L, ValueAndTimestamp.make("0+3+5+6", 13L));
expected.put(7L, ValueAndTimestamp.make("0+5+6", 13L));
expected.put(11L, ValueAndTimestamp.make("0+5", 13L));
assertEquals(expected, actual);
}
use of org.apache.kafka.streams.state.ValueAndTimestamp in project kafka by apache.
the class KStreamSlidingWindowAggregateTest method testEarlyRecordsRepeatedInput.
@Test
public void testEarlyRecordsRepeatedInput() {
final StreamsBuilder builder = new StreamsBuilder();
final String topic = "topic";
final KTable<Windowed<String>, String> table2 = builder.stream(topic, Consumed.with(Serdes.String(), Serdes.String())).groupByKey(Grouped.with(Serdes.String(), Serdes.String())).windowedBy(SlidingWindows.ofTimeDifferenceAndGrace(ofMillis(5), ofMillis(20))).aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, Materialized.<String, String, WindowStore<Bytes, byte[]>>as("topic-Canonized").withValueSerde(Serdes.String()));
final MockApiProcessorSupplier<Windowed<String>, String, Void, Void> supplier = new MockApiProcessorSupplier<>();
table2.toStream().process(supplier);
try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
final TestInputTopic<String, String> inputTopic = driver.createInputTopic(topic, new StringSerializer(), new StringSerializer());
inputTopic.pipeInput("A", "1", 0L);
inputTopic.pipeInput("A", "2", 2L);
inputTopic.pipeInput("A", "3", 4L);
inputTopic.pipeInput("A", "4", 0L);
inputTopic.pipeInput("A", "5", 2L);
inputTopic.pipeInput("A", "6", 2L);
inputTopic.pipeInput("A", "7", 0L);
}
final Map<Long, ValueAndTimestamp<String>> actual = new HashMap<>();
for (final KeyValueTimestamp<Windowed<String>, String> entry : supplier.theCapturedProcessor().processed()) {
final Windowed<String> window = entry.key();
final Long start = window.window().start();
final ValueAndTimestamp<String> valueAndTimestamp = ValueAndTimestamp.make(entry.value(), entry.timestamp());
if (actual.putIfAbsent(start, valueAndTimestamp) != null) {
actual.replace(start, valueAndTimestamp);
}
}
final Map<Long, ValueAndTimestamp<String>> expected = new HashMap<>();
expected.put(0L, ValueAndTimestamp.make("0+1+2+3+4+5+6+7", 4L));
expected.put(1L, ValueAndTimestamp.make("0+2+3+5+6", 4L));
expected.put(3L, ValueAndTimestamp.make("0+3", 4L));
assertEquals(expected, actual);
}
Aggregations