use of org.apache.kafka.test.InternalMockProcessorContext in project kafka by apache.
the class RecordQueueTest method shouldDropOnNegativeTimestamp.
@Test
public void shouldDropOnNegativeTimestamp() {
final List<ConsumerRecord<byte[], byte[]>> records = Collections.singletonList(new ConsumerRecord<>("topic", 1, 1, -1L, TimestampType.CREATE_TIME, 0, 0, recordKey, recordValue, new RecordHeaders(), Optional.empty()));
final RecordQueue queue = new RecordQueue(new TopicPartition("topic", 1), mockSourceNodeWithMetrics, new LogAndSkipOnInvalidTimestamp(), new LogAndContinueExceptionHandler(), new InternalMockProcessorContext(), new LogContext());
queue.addRawRecords(records);
assertEquals(0, queue.size());
}
use of org.apache.kafka.test.InternalMockProcessorContext in project kafka by apache.
the class GlobalStateManagerImplTest method before.
@Before
public void before() {
final Map<String, String> storeToTopic = new HashMap<>();
storeToTopic.put(storeName1, t1.topic());
storeToTopic.put(storeName2, t2.topic());
storeToTopic.put(storeName3, t3.topic());
storeToTopic.put(storeName4, t4.topic());
store1 = new NoOpReadOnlyStore<>(storeName1, true);
store2 = new ConverterStore<>(storeName2, true);
store3 = new NoOpReadOnlyStore<>(storeName3);
store4 = new NoOpReadOnlyStore<>(storeName4);
topology = withGlobalStores(asList(store1, store2, store3, store4), storeToTopic);
streamsConfig = new StreamsConfig(new Properties() {
{
put(StreamsConfig.APPLICATION_ID_CONFIG, "appId");
put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:1234");
put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath());
}
});
stateDirectory = new StateDirectory(streamsConfig, time, true, false);
consumer = new MockConsumer<>(OffsetResetStrategy.NONE);
stateManager = new GlobalStateManagerImpl(new LogContext("test"), time, topology, consumer, stateDirectory, stateRestoreListener, streamsConfig);
processorContext = new InternalMockProcessorContext(stateDirectory.globalStateDir(), streamsConfig);
stateManager.setGlobalProcessorContext(processorContext);
checkpointFile = new File(stateManager.baseDir(), StateManagerUtil.CHECKPOINT_FILE_NAME);
}
use of org.apache.kafka.test.InternalMockProcessorContext in project kafka by apache.
the class CachingPersistentSessionStoreTest method before.
@Before
public void before() {
final RocksDBSegmentedBytesStore segmented = new RocksDBSegmentedBytesStore("store-name", "metric-scope", Long.MAX_VALUE, SEGMENT_INTERVAL, new SessionKeySchema());
underlyingStore = new RocksDBSessionStore(segmented);
cachingStore = new CachingSessionStore(underlyingStore, SEGMENT_INTERVAL);
cache = new ThreadCache(new LogContext("testCache "), MAX_CACHE_SIZE_BYTES, new MockStreamsMetrics(new Metrics()));
final InternalMockProcessorContext context = new InternalMockProcessorContext<>(TestUtils.tempDirectory(), null, null, null, cache);
context.setRecordContext(new ProcessorRecordContext(DEFAULT_TIMESTAMP, 0, 0, TOPIC, new RecordHeaders()));
cachingStore.init((StateStoreContext) context, cachingStore);
}
use of org.apache.kafka.test.InternalMockProcessorContext in project kafka by apache.
the class AbstractRocksDBSegmentedBytesStoreTest method shouldLogAndMeasureExpiredRecords.
@Test
public void shouldLogAndMeasureExpiredRecords() {
final Properties streamsConfig = StreamsTestUtils.getStreamsConfig();
final AbstractRocksDBSegmentedBytesStore<S> bytesStore = getBytesStore();
final InternalMockProcessorContext context = new InternalMockProcessorContext(TestUtils.tempDirectory(), new StreamsConfig(streamsConfig));
final Time time = new SystemTime();
context.setSystemTimeMs(time.milliseconds());
bytesStore.init((StateStoreContext) context, bytesStore);
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
// write a record to advance stream time, with a high enough timestamp
// that the subsequent record in windows[0] will already be expired.
bytesStore.put(serializeKey(new Windowed<>("dummy", nextSegmentWindow)), serializeValue(0));
final Bytes key = serializeKey(new Windowed<>("a", windows[0]));
final byte[] value = serializeValue(5);
bytesStore.put(key, value);
final List<String> messages = appender.getMessages();
assertThat(messages, hasItem("Skipping record for expired segment."));
}
final Map<MetricName, ? extends Metric> metrics = context.metrics().metrics();
final String threadId = Thread.currentThread().getName();
final Metric dropTotal;
final Metric dropRate;
dropTotal = metrics.get(new MetricName("dropped-records-total", "stream-task-metrics", "", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0"))));
dropRate = metrics.get(new MetricName("dropped-records-rate", "stream-task-metrics", "", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0"))));
assertEquals(1.0, dropTotal.metricValue());
assertNotEquals(0.0, dropRate.metricValue());
bytesStore.close();
}
use of org.apache.kafka.test.InternalMockProcessorContext in project kafka by apache.
the class ChangeLoggingKeyValueBytesStoreTest method shouldDelegateInit.
@Test
public void shouldDelegateInit() {
final InternalMockProcessorContext context = mockContext();
final KeyValueStore<Bytes, byte[]> innerMock = EasyMock.mock(InMemoryKeyValueStore.class);
final StateStore outer = new ChangeLoggingKeyValueBytesStore(innerMock);
innerMock.init((StateStoreContext) context, outer);
EasyMock.expectLastCall();
EasyMock.replay(innerMock);
outer.init((StateStoreContext) context, outer);
EasyMock.verify(innerMock);
}
Aggregations