use of org.apache.kafka.streams.processor.internals.MockStreamsMetrics in project kafka by apache.
the class CachingInMemoryKeyValueStoreTest method setUp.
@Before
public void setUp() {
final String storeName = "store";
underlyingStore = new InMemoryKeyValueStore(storeName);
cacheFlushListener = new CacheFlushListenerStub<>(new StringDeserializer(), new StringDeserializer());
store = new CachingKeyValueStore(underlyingStore, false);
store.setFlushListener(cacheFlushListener, false);
cache = new ThreadCache(new LogContext("testCache "), maxCacheSizeBytes, new MockStreamsMetrics(new Metrics()));
context = new InternalMockProcessorContext<>(null, null, null, null, cache);
context.setRecordContext(new ProcessorRecordContext(10, 0, 0, TOPIC, new RecordHeaders()));
store.init((StateStoreContext) context, null);
}
use of org.apache.kafka.streams.processor.internals.MockStreamsMetrics in project kafka by apache.
the class CachingPersistentSessionStoreTest method before.
@Before
public void before() {
final RocksDBSegmentedBytesStore segmented = new RocksDBSegmentedBytesStore("store-name", "metric-scope", Long.MAX_VALUE, SEGMENT_INTERVAL, new SessionKeySchema());
underlyingStore = new RocksDBSessionStore(segmented);
cachingStore = new CachingSessionStore(underlyingStore, SEGMENT_INTERVAL);
cache = new ThreadCache(new LogContext("testCache "), MAX_CACHE_SIZE_BYTES, new MockStreamsMetrics(new Metrics()));
final InternalMockProcessorContext context = new InternalMockProcessorContext<>(TestUtils.tempDirectory(), null, null, null, cache);
context.setRecordContext(new ProcessorRecordContext(DEFAULT_TIMESTAMP, 0, 0, TOPIC, new RecordHeaders()));
cachingStore.init((StateStoreContext) context, cachingStore);
}
use of org.apache.kafka.streams.processor.internals.MockStreamsMetrics in project kafka by apache.
the class AbstractRocksDBSegmentedBytesStoreTest method shouldRestoreRecordsAndConsistencyVectorSingleTopic.
@Test
public void shouldRestoreRecordsAndConsistencyVectorSingleTopic() {
final Properties props = StreamsTestUtils.getStreamsConfig();
props.put(InternalConfig.IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED, true);
final File dir = TestUtils.tempDirectory();
context = new InternalMockProcessorContext<>(dir, Serdes.String(), Serdes.String(), new StreamsMetricsImpl(new Metrics(), "mock", StreamsConfig.METRICS_LATEST, new MockTime()), new StreamsConfig(props), MockRecordCollector::new, new ThreadCache(new LogContext("testCache "), 0, new MockStreamsMetrics(new Metrics())), Time.SYSTEM);
bytesStore = getBytesStore();
bytesStore.init((StateStoreContext) context, bytesStore);
// 0 segments initially.
assertEquals(0, bytesStore.getSegments().size());
bytesStore.restoreAllInternal(getChangelogRecords());
// 2 segments are created during restoration.
assertEquals(2, bytesStore.getSegments().size());
final String key = "a";
final List<KeyValue<Windowed<String>, Long>> expected = new ArrayList<>();
expected.add(new KeyValue<>(new Windowed<>(key, windows[0]), 50L));
expected.add(new KeyValue<>(new Windowed<>(key, windows[2]), 100L));
expected.add(new KeyValue<>(new Windowed<>(key, windows[3]), 200L));
final List<KeyValue<Windowed<String>, Long>> results = toList(bytesStore.all());
assertEquals(expected, results);
assertThat(bytesStore.getPosition(), Matchers.notNullValue());
assertThat(bytesStore.getPosition().getPartitionPositions(""), Matchers.notNullValue());
assertThat(bytesStore.getPosition().getPartitionPositions(""), hasEntry(0, 3L));
}
use of org.apache.kafka.streams.processor.internals.MockStreamsMetrics in project kafka by apache.
the class AbstractRocksDBSegmentedBytesStoreTest method shouldHandleTombstoneRecords.
@Test
public void shouldHandleTombstoneRecords() {
final Properties props = StreamsTestUtils.getStreamsConfig();
props.put(InternalConfig.IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED, true);
final File dir = TestUtils.tempDirectory();
context = new InternalMockProcessorContext<>(dir, Serdes.String(), Serdes.String(), new StreamsMetricsImpl(new Metrics(), "mock", StreamsConfig.METRICS_LATEST, new MockTime()), new StreamsConfig(props), MockRecordCollector::new, new ThreadCache(new LogContext("testCache "), 0, new MockStreamsMetrics(new Metrics())), Time.SYSTEM);
bytesStore = getBytesStore();
bytesStore.init((StateStoreContext) context, bytesStore);
// 0 segments initially.
assertEquals(0, bytesStore.getSegments().size());
bytesStore.restoreAllInternal(getChangelogRecordsWithTombstones());
// 1 segments are created during restoration.
assertEquals(1, bytesStore.getSegments().size());
final String key = "a";
final List<KeyValue<Windowed<String>, Long>> expected = new ArrayList<>();
expected.add(new KeyValue<>(new Windowed<>(key, windows[0]), 50L));
final List<KeyValue<Windowed<String>, Long>> results = toList(bytesStore.all());
assertEquals(expected, results);
assertThat(bytesStore.getPosition(), Matchers.notNullValue());
assertThat(bytesStore.getPosition().getPartitionPositions("A"), hasEntry(0, 2L));
}
use of org.apache.kafka.streams.processor.internals.MockStreamsMetrics in project kafka by apache.
the class MergedSortedCacheKeyValueBytesStoreIteratorTest method shouldPeekNextKey.
@Test
public void shouldPeekNextKey() {
final KeyValueStore<Bytes, byte[]> kv = new InMemoryKeyValueStore("one");
final ThreadCache cache = new ThreadCache(new LogContext("testCache "), 1000000L, new MockStreamsMetrics(new Metrics()));
final byte[][] bytes = { { 0 }, { 1 }, { 2 }, { 3 }, { 4 }, { 5 }, { 6 }, { 7 }, { 8 }, { 9 }, { 10 } };
for (int i = 0; i < bytes.length - 1; i += 2) {
kv.put(Bytes.wrap(bytes[i]), bytes[i]);
cache.put(namespace, Bytes.wrap(bytes[i + 1]), new LRUCacheEntry(bytes[i + 1]));
}
final Bytes from = Bytes.wrap(new byte[] { 2 });
final Bytes to = Bytes.wrap(new byte[] { 9 });
final KeyValueIterator<Bytes, byte[]> storeIterator = kv.range(from, to);
final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = cache.range(namespace, from, to);
final MergedSortedCacheKeyValueBytesStoreIterator iterator = new MergedSortedCacheKeyValueBytesStoreIterator(cacheIterator, storeIterator, true);
final byte[][] values = new byte[8][];
int index = 0;
int bytesIndex = 2;
while (iterator.hasNext()) {
final byte[] keys = iterator.peekNextKey().get();
values[index++] = keys;
assertArrayEquals(bytes[bytesIndex++], keys);
iterator.next();
}
iterator.close();
}
Aggregations