use of org.apache.kafka.streams.processor.internals.MockStreamsMetrics in project kafka by apache.
the class AbstractRocksDBSegmentedBytesStoreTest method shouldRestoreRecordsAndConsistencyVectorMultipleTopics.
@Test
public void shouldRestoreRecordsAndConsistencyVectorMultipleTopics() {
final Properties props = StreamsTestUtils.getStreamsConfig();
props.put(InternalConfig.IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED, true);
final File dir = TestUtils.tempDirectory();
context = new InternalMockProcessorContext<>(dir, Serdes.String(), Serdes.String(), new StreamsMetricsImpl(new Metrics(), "mock", StreamsConfig.METRICS_LATEST, new MockTime()), new StreamsConfig(props), MockRecordCollector::new, new ThreadCache(new LogContext("testCache "), 0, new MockStreamsMetrics(new Metrics())), Time.SYSTEM);
bytesStore = getBytesStore();
bytesStore.init((StateStoreContext) context, bytesStore);
// 0 segments initially.
assertEquals(0, bytesStore.getSegments().size());
bytesStore.restoreAllInternal(getChangelogRecordsMultipleTopics());
// 2 segments are created during restoration.
assertEquals(2, bytesStore.getSegments().size());
final String key = "a";
final List<KeyValue<Windowed<String>, Long>> expected = new ArrayList<>();
expected.add(new KeyValue<>(new Windowed<>(key, windows[0]), 50L));
expected.add(new KeyValue<>(new Windowed<>(key, windows[2]), 100L));
expected.add(new KeyValue<>(new Windowed<>(key, windows[3]), 200L));
final List<KeyValue<Windowed<String>, Long>> results = toList(bytesStore.all());
assertEquals(expected, results);
assertThat(bytesStore.getPosition(), Matchers.notNullValue());
assertThat(bytesStore.getPosition().getPartitionPositions("A"), Matchers.notNullValue());
assertThat(bytesStore.getPosition().getPartitionPositions("A"), hasEntry(0, 3L));
assertThat(bytesStore.getPosition().getPartitionPositions("B"), Matchers.notNullValue());
assertThat(bytesStore.getPosition().getPartitionPositions("B"), hasEntry(0, 2L));
}
use of org.apache.kafka.streams.processor.internals.MockStreamsMetrics in project kafka by apache.
the class AbstractRocksDBSegmentedBytesStoreTest method shouldNotThrowWhenRestoringOnMissingHeaders.
@Test
public void shouldNotThrowWhenRestoringOnMissingHeaders() {
final Properties props = StreamsTestUtils.getStreamsConfig();
props.put(InternalConfig.IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED, true);
final File dir = TestUtils.tempDirectory();
context = new InternalMockProcessorContext<>(dir, Serdes.String(), Serdes.String(), new StreamsMetricsImpl(new Metrics(), "mock", StreamsConfig.METRICS_LATEST, new MockTime()), new StreamsConfig(props), MockRecordCollector::new, new ThreadCache(new LogContext("testCache "), 0, new MockStreamsMetrics(new Metrics())), Time.SYSTEM);
bytesStore = getBytesStore();
bytesStore.init((StateStoreContext) context, bytesStore);
bytesStore.restoreAllInternal(getChangelogRecordsWithoutHeaders());
assertThat(bytesStore.getPosition(), is(Position.emptyPosition()));
}
use of org.apache.kafka.streams.processor.internals.MockStreamsMetrics in project kafka by apache.
the class AbstractWindowBytesStoreTest method setup.
@Before
public void setup() {
windowStore = buildWindowStore(RETENTION_PERIOD, WINDOW_SIZE, false, Serdes.Integer(), Serdes.String());
recordCollector = new MockRecordCollector();
context = new InternalMockProcessorContext<>(baseDir, Serdes.String(), Serdes.Integer(), recordCollector, new ThreadCache(new LogContext("testCache"), 0, new MockStreamsMetrics(new Metrics())));
context.setTime(1L);
windowStore.init((StateStoreContext) context, windowStore);
}
use of org.apache.kafka.streams.processor.internals.MockStreamsMetrics in project kafka by apache.
the class CachingInMemorySessionStoreTest method before.
@Before
public void before() {
underlyingStore = new InMemorySessionStore("store-name", Long.MAX_VALUE, "metric-scope");
cachingStore = new CachingSessionStore(underlyingStore, SEGMENT_INTERVAL);
cache = new ThreadCache(new LogContext("testCache "), MAX_CACHE_SIZE_BYTES, new MockStreamsMetrics(new Metrics()));
context = new InternalMockProcessorContext<>(TestUtils.tempDirectory(), null, null, null, cache);
context.setRecordContext(new ProcessorRecordContext(DEFAULT_TIMESTAMP, 0, 0, TOPIC, new RecordHeaders()));
cachingStore.init((StateStoreContext) context, cachingStore);
}
use of org.apache.kafka.streams.processor.internals.MockStreamsMetrics in project kafka by apache.
the class CachingPersistentWindowStoreTest method setUp.
@Before
public void setUp() {
keySchema = new WindowKeySchema();
bytesStore = new RocksDBSegmentedBytesStore("test", "metrics-scope", 0, SEGMENT_INTERVAL, keySchema);
underlyingStore = new RocksDBWindowStore(bytesStore, false, WINDOW_SIZE);
final TimeWindowedDeserializer<String> keyDeserializer = new TimeWindowedDeserializer<>(new StringDeserializer(), WINDOW_SIZE);
keyDeserializer.setIsChangelogTopic(true);
cacheListener = new CacheFlushListenerStub<>(keyDeserializer, new StringDeserializer());
cachingStore = new CachingWindowStore(underlyingStore, WINDOW_SIZE, SEGMENT_INTERVAL);
cachingStore.setFlushListener(cacheListener, false);
cache = new ThreadCache(new LogContext("testCache "), MAX_CACHE_SIZE_BYTES, new MockStreamsMetrics(new Metrics()));
context = new InternalMockProcessorContext<>(TestUtils.tempDirectory(), null, null, null, cache);
context.setRecordContext(new ProcessorRecordContext(DEFAULT_TIMESTAMP, 0, 0, TOPIC, new RecordHeaders()));
cachingStore.init((StateStoreContext) context, cachingStore);
}
Aggregations