use of org.apache.kafka.streams.KeyValue in project apache-kafka-on-k8s by banzaicloud.
the class CachingSessionStoreTest method shouldFetchAllSessionsWithSameRecordKey.
@Test
public void shouldFetchAllSessionsWithSameRecordKey() {
final List<KeyValue<Windowed<Bytes>, byte[]>> expected = Arrays.asList(KeyValue.pair(new Windowed<>(keyA, new SessionWindow(0, 0)), "1".getBytes()), KeyValue.pair(new Windowed<>(keyA, new SessionWindow(10, 10)), "2".getBytes()), KeyValue.pair(new Windowed<>(keyA, new SessionWindow(100, 100)), "3".getBytes()), KeyValue.pair(new Windowed<>(keyA, new SessionWindow(1000, 1000)), "4".getBytes()));
for (KeyValue<Windowed<Bytes>, byte[]> kv : expected) {
cachingStore.put(kv.key, kv.value);
}
// add one that shouldn't appear in the results
cachingStore.put(new Windowed<>(keyAA, new SessionWindow(0, 0)), "5".getBytes());
final List<KeyValue<Windowed<Bytes>, byte[]>> results = toList(cachingStore.fetch(keyA));
verifyKeyValueList(expected, results);
}
use of org.apache.kafka.streams.KeyValue in project apache-kafka-on-k8s by banzaicloud.
the class CompositeReadOnlyWindowStoreTest method shouldFetchKeyRangeAcrossStores.
@Test
public void shouldFetchKeyRangeAcrossStores() {
final ReadOnlyWindowStoreStub<String, String> secondUnderlying = new ReadOnlyWindowStoreStub<>(WINDOW_SIZE);
stubProviderTwo.addStore(storeName, secondUnderlying);
underlyingWindowStore.put("a", "a", 0L);
secondUnderlying.put("b", "b", 10L);
List<KeyValue<Windowed<String>, String>> results = StreamsTestUtils.toList(windowStore.fetch("a", "b", 0, 10));
assertThat(results, equalTo(Arrays.asList(KeyValue.pair(new Windowed<>("a", new TimeWindow(0, WINDOW_SIZE)), "a"), KeyValue.pair(new Windowed<>("b", new TimeWindow(10, 10 + WINDOW_SIZE)), "b"))));
}
use of org.apache.kafka.streams.KeyValue in project apache-kafka-on-k8s by banzaicloud.
the class CompositeReadOnlyWindowStoreTest method shouldGetAllAcrossStores.
@Test
public void shouldGetAllAcrossStores() {
final ReadOnlyWindowStoreStub<String, String> secondUnderlying = new ReadOnlyWindowStoreStub<>(WINDOW_SIZE);
stubProviderTwo.addStore(storeName, secondUnderlying);
underlyingWindowStore.put("a", "a", 0L);
secondUnderlying.put("b", "b", 10L);
List<KeyValue<Windowed<String>, String>> results = StreamsTestUtils.toList(windowStore.all());
assertThat(results, equalTo(Arrays.asList(KeyValue.pair(new Windowed<>("a", new TimeWindow(0, WINDOW_SIZE)), "a"), KeyValue.pair(new Windowed<>("b", new TimeWindow(10, 10 + WINDOW_SIZE)), "b"))));
}
use of org.apache.kafka.streams.KeyValue in project apache-kafka-on-k8s by banzaicloud.
the class ProcessorStateManager method updateStandbyStates.
List<ConsumerRecord<byte[], byte[]>> updateStandbyStates(final TopicPartition storePartition, final List<ConsumerRecord<byte[], byte[]>> records) {
final long limit = offsetLimit(storePartition);
List<ConsumerRecord<byte[], byte[]>> remainingRecords = null;
final List<KeyValue<byte[], byte[]>> restoreRecords = new ArrayList<>();
// restore states from changelog records
final BatchingStateRestoreCallback restoreCallback = getBatchingRestoreCallback(restoreCallbacks.get(storePartition.topic()));
long lastOffset = -1L;
int count = 0;
for (final ConsumerRecord<byte[], byte[]> record : records) {
if (record.offset() < limit) {
restoreRecords.add(KeyValue.pair(record.key(), record.value()));
lastOffset = record.offset();
} else {
if (remainingRecords == null) {
remainingRecords = new ArrayList<>(records.size() - count);
}
remainingRecords.add(record);
}
count++;
}
if (!restoreRecords.isEmpty()) {
try {
restoreCallback.restoreAll(restoreRecords);
} catch (final Exception e) {
throw new ProcessorStateException(String.format("%sException caught while trying to restore state from %s", logPrefix, storePartition), e);
}
}
// record the restored offset for its change log partition
restoredOffsets.put(storePartition, lastOffset + 1);
return remainingRecords;
}
use of org.apache.kafka.streams.KeyValue in project incubator-rya by apache.
the class KeyValueJoinStateStore method store.
@Override
public void store(final BinaryResult result) {
requireNonNull(result);
// The join key prefix is an ordered list of values from the binding set that match the join variables.
// This is a prefix for every row that holds values for a specific set of join variable values.
final Side side = result.getSide();
final VisibilityBindingSet bs = result.getResult();
final String joinKeyPrefix = makeCommaDelimitedValues(side, joinVars, bs);
final List<KeyValue<String, VisibilityBindingSet>> values = new ArrayList<>();
// For each join variable set, we need a start key for scanning,
final String startKey = joinKeyPrefix + START_RANGE_SUFFIX;
values.add(new KeyValue<>(startKey, RANGE_MARKER_VALUE));
// The actual value that was emitted as a result.
final String valueKey = makeCommaDelimitedValues(side, allVars, bs);
values.add(new KeyValue<>(valueKey, bs));
// And the end key for scanning.
final String endKey = joinKeyPrefix + END_RANGE_SUFFIX;
values.add(new KeyValue<>(endKey, RANGE_MARKER_VALUE));
// Write the pairs to the store.
log.debug("\nStoring the following values: {}\n", values);
store.putAll(values);
}
Aggregations