use of org.apache.kafka.common.utils.SystemTime in project kafka by apache.
the class SensorTest method testShouldRecordForDebugLevelSensor.
@Test
public void testShouldRecordForDebugLevelSensor() {
Sensor debugSensor = new Sensor(null, "debugSensor", null, INFO_CONFIG, new SystemTime(), 0, Sensor.RecordingLevel.DEBUG);
assertFalse(debugSensor.shouldRecord());
debugSensor = new Sensor(null, "debugSensor", null, DEBUG_CONFIG, new SystemTime(), 0, Sensor.RecordingLevel.DEBUG);
assertTrue(debugSensor.shouldRecord());
debugSensor = new Sensor(null, "debugSensor", null, TRACE_CONFIG, new SystemTime(), 0, Sensor.RecordingLevel.DEBUG);
assertTrue(debugSensor.shouldRecord());
}
use of org.apache.kafka.common.utils.SystemTime in project kafka by apache.
the class SensorTest method testShouldRecordForTraceLevelSensor.
@Test
public void testShouldRecordForTraceLevelSensor() {
Sensor traceSensor = new Sensor(null, "traceSensor", null, INFO_CONFIG, new SystemTime(), 0, Sensor.RecordingLevel.TRACE);
assertFalse(traceSensor.shouldRecord());
traceSensor = new Sensor(null, "traceSensor", null, DEBUG_CONFIG, new SystemTime(), 0, Sensor.RecordingLevel.TRACE);
assertFalse(traceSensor.shouldRecord());
traceSensor = new Sensor(null, "traceSensor", null, TRACE_CONFIG, new SystemTime(), 0, Sensor.RecordingLevel.TRACE);
assertTrue(traceSensor.shouldRecord());
}
use of org.apache.kafka.common.utils.SystemTime in project kafka by apache.
the class ConsumerCoordinatorTest method shouldLoseAllOwnedPartitionsBeforeRejoiningAfterDroppingOutOfTheGroup.
@Test
public void shouldLoseAllOwnedPartitionsBeforeRejoiningAfterDroppingOutOfTheGroup() {
final List<TopicPartition> partitions = singletonList(t1p);
try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, false, Optional.of("group-id"))) {
final SystemTime realTime = new SystemTime();
coordinator.ensureActiveGroup();
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.REBALANCE_IN_PROGRESS);
assertThrows(RebalanceInProgressException.class, () -> coordinator.commitOffsetsSync(singletonMap(t1p, new OffsetAndMetadata(100L)), time.timer(Long.MAX_VALUE)));
int generationId = 42;
String memberId = "consumer-42";
client.prepareResponse(joinGroupFollowerResponse(generationId, memberId, "leader", Errors.NONE));
client.prepareResponse(syncGroupResponse(Collections.emptyList(), Errors.UNKNOWN_MEMBER_ID));
boolean res = coordinator.joinGroupIfNeeded(realTime.timer(1000));
assertFalse(res);
assertEquals(AbstractCoordinator.Generation.NO_GENERATION, coordinator.generation());
assertEquals("", coordinator.generation().memberId);
res = coordinator.joinGroupIfNeeded(realTime.timer(1000));
assertFalse(res);
}
Collection<TopicPartition> lost = getLost(partitions);
assertEquals(lost.isEmpty() ? 0 : 1, rebalanceListener.lostCount);
assertEquals(lost.isEmpty() ? null : lost, rebalanceListener.lost);
}
use of org.apache.kafka.common.utils.SystemTime in project kafka by apache.
the class AbstractWindowBytesStoreTest method shouldLogAndMeasureExpiredRecords.
@Test
public void shouldLogAndMeasureExpiredRecords() {
final Properties streamsConfig = StreamsTestUtils.getStreamsConfig();
final WindowStore<Integer, String> windowStore = buildWindowStore(RETENTION_PERIOD, WINDOW_SIZE, false, Serdes.Integer(), Serdes.String());
final InternalMockProcessorContext context = new InternalMockProcessorContext(TestUtils.tempDirectory(), new StreamsConfig(streamsConfig), recordCollector);
final Time time = new SystemTime();
context.setSystemTimeMs(time.milliseconds());
context.setTime(1L);
windowStore.init((StateStoreContext) context, windowStore);
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
// Advance stream time by inserting record with large enough timestamp that records with timestamp 0 are expired
windowStore.put(1, "initial record", 2 * RETENTION_PERIOD);
// Try inserting a record with timestamp 0 -- should be dropped
windowStore.put(1, "late record", 0L);
windowStore.put(1, "another on-time record", RETENTION_PERIOD + 1);
final List<String> messages = appender.getMessages();
assertThat(messages, hasItem("Skipping record for expired segment."));
}
final Map<MetricName, ? extends Metric> metrics = context.metrics().metrics();
final String threadId = Thread.currentThread().getName();
final Metric dropTotal;
final Metric dropRate;
dropTotal = metrics.get(new MetricName("dropped-records-total", "stream-task-metrics", "", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0"))));
dropRate = metrics.get(new MetricName("dropped-records-rate", "stream-task-metrics", "", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0"))));
assertEquals(1.0, dropTotal.metricValue());
assertNotEquals(0.0, dropRate.metricValue());
windowStore.close();
}
use of org.apache.kafka.common.utils.SystemTime in project kafka by apache.
the class TopologyTestDriverTest method shouldReturnAllStoresNames.
@Test
public void shouldReturnAllStoresNames() {
final Topology topology = setupSourceSinkTopology();
topology.addStateStore(new KeyValueStoreBuilder<>(Stores.inMemoryKeyValueStore("store"), Serdes.ByteArray(), Serdes.ByteArray(), new SystemTime()));
topology.addGlobalStore(new KeyValueStoreBuilder<>(Stores.inMemoryKeyValueStore("globalStore"), Serdes.ByteArray(), Serdes.ByteArray(), new SystemTime()).withLoggingDisabled(), "sourceProcessorName", Serdes.ByteArray().deserializer(), Serdes.ByteArray().deserializer(), "globalTopicName", "globalProcessorName", voidProcessorSupplier);
testDriver = new TopologyTestDriver(topology, config);
final Set<String> expectedStoreNames = new HashSet<>();
expectedStoreNames.add("store");
expectedStoreNames.add("globalStore");
assertThat(testDriver.getAllStateStores().keySet(), equalTo(expectedStoreNames));
}
Aggregations