use of org.apache.kafka.common.utils.SystemTime in project kafka by apache.
the class SensorTest method testShouldRecordForInfoLevelSensor.
@Test
public void testShouldRecordForInfoLevelSensor() {
Sensor infoSensor = new Sensor(null, "infoSensor", null, INFO_CONFIG, new SystemTime(), 0, Sensor.RecordingLevel.INFO);
assertTrue(infoSensor.shouldRecord());
infoSensor = new Sensor(null, "infoSensor", null, DEBUG_CONFIG, new SystemTime(), 0, Sensor.RecordingLevel.INFO);
assertTrue(infoSensor.shouldRecord());
infoSensor = new Sensor(null, "infoSensor", null, TRACE_CONFIG, new SystemTime(), 0, Sensor.RecordingLevel.INFO);
assertTrue(infoSensor.shouldRecord());
}
use of org.apache.kafka.common.utils.SystemTime in project kafka by apache.
the class ConsumerCoordinatorTest method shouldLoseAllOwnedPartitionsBeforeRejoiningAfterResettingGenerationId.
@Test
public void shouldLoseAllOwnedPartitionsBeforeRejoiningAfterResettingGenerationId() {
final List<TopicPartition> partitions = singletonList(t1p);
try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, false, Optional.of("group-id"))) {
final SystemTime realTime = new SystemTime();
coordinator.ensureActiveGroup();
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.REBALANCE_IN_PROGRESS);
assertThrows(RebalanceInProgressException.class, () -> coordinator.commitOffsetsSync(singletonMap(t1p, new OffsetAndMetadata(100L)), time.timer(Long.MAX_VALUE)));
int generationId = 42;
String memberId = "consumer-42";
client.prepareResponse(joinGroupFollowerResponse(generationId, memberId, "leader", Errors.NONE));
client.prepareResponse(syncGroupResponse(Collections.emptyList(), Errors.ILLEGAL_GENERATION));
boolean res = coordinator.joinGroupIfNeeded(realTime.timer(1000));
assertFalse(res);
assertEquals(AbstractCoordinator.Generation.NO_GENERATION.generationId, coordinator.generation().generationId);
assertEquals(AbstractCoordinator.Generation.NO_GENERATION.protocolName, coordinator.generation().protocolName);
// member ID should not be reset
assertEquals(memberId, coordinator.generation().memberId);
res = coordinator.joinGroupIfNeeded(realTime.timer(1000));
assertFalse(res);
}
Collection<TopicPartition> lost = getLost(partitions);
assertEquals(lost.isEmpty() ? 0 : 1, rebalanceListener.lostCount);
assertEquals(lost.isEmpty() ? null : lost, rebalanceListener.lost);
}
use of org.apache.kafka.common.utils.SystemTime in project kafka by apache.
the class KTableSuppressProcessorMetricsTest method shouldRecordMetricsWithBuiltInMetricsVersionLatest.
@Test
public void shouldRecordMetricsWithBuiltInMetricsVersionLatest() {
final String storeName = "test-store";
final StateStore buffer = new InMemoryTimeOrderedKeyValueBuffer.Builder<>(storeName, Serdes.String(), Serdes.Long()).withLoggingDisabled().build();
final KTableImpl<String, ?, Long> mock = EasyMock.mock(KTableImpl.class);
final Processor<String, Change<Long>, String, Change<Long>> processor = new KTableSuppressProcessorSupplier<>((SuppressedInternal<String>) Suppressed.<String>untilTimeLimit(Duration.ofDays(100), maxRecords(1)), storeName, mock).get();
streamsConfig.setProperty(StreamsConfig.BUILT_IN_METRICS_VERSION_CONFIG, StreamsConfig.METRICS_LATEST);
final MockInternalNewProcessorContext<String, Change<Long>> context = new MockInternalNewProcessorContext<>(streamsConfig, TASK_ID, TestUtils.tempDirectory());
final Time time = new SystemTime();
context.setCurrentNode(new ProcessorNode("testNode"));
context.setSystemTimeMs(time.milliseconds());
buffer.init((StateStoreContext) context, buffer);
processor.init(context);
final long timestamp = 100L;
context.setRecordMetadata("", 0, 0L);
context.setTimestamp(timestamp);
final String key = "longKey";
final Change<Long> value = new Change<>(null, ARBITRARY_LONG);
processor.process(new Record<>(key, value, timestamp));
final MetricName evictionRateMetric = evictionRateMetricLatest;
final MetricName evictionTotalMetric = evictionTotalMetricLatest;
final MetricName bufferSizeAvgMetric = bufferSizeAvgMetricLatest;
final MetricName bufferSizeMaxMetric = bufferSizeMaxMetricLatest;
final MetricName bufferCountAvgMetric = bufferCountAvgMetricLatest;
final MetricName bufferCountMaxMetric = bufferCountMaxMetricLatest;
{
final Map<MetricName, ? extends Metric> metrics = context.metrics().metrics();
verifyMetric(metrics, evictionRateMetric, is(0.0));
verifyMetric(metrics, evictionTotalMetric, is(0.0));
verifyMetric(metrics, bufferSizeAvgMetric, is(21.5));
verifyMetric(metrics, bufferSizeMaxMetric, is(43.0));
verifyMetric(metrics, bufferCountAvgMetric, is(0.5));
verifyMetric(metrics, bufferCountMaxMetric, is(1.0));
}
context.setRecordMetadata("", 0, 1L);
context.setTimestamp(timestamp + 1);
processor.process(new Record<>("key", value, timestamp + 1));
{
final Map<MetricName, ? extends Metric> metrics = context.metrics().metrics();
verifyMetric(metrics, evictionRateMetric, greaterThan(0.0));
verifyMetric(metrics, evictionTotalMetric, is(1.0));
verifyMetric(metrics, bufferSizeAvgMetric, is(41.0));
verifyMetric(metrics, bufferSizeMaxMetric, is(82.0));
verifyMetric(metrics, bufferCountAvgMetric, is(1.0));
verifyMetric(metrics, bufferCountMaxMetric, is(2.0));
}
}
use of org.apache.kafka.common.utils.SystemTime in project kafka by apache.
the class AbstractRocksDBSegmentedBytesStoreTest method shouldLogAndMeasureExpiredRecords.
@Test
public void shouldLogAndMeasureExpiredRecords() {
final Properties streamsConfig = StreamsTestUtils.getStreamsConfig();
final AbstractRocksDBSegmentedBytesStore<S> bytesStore = getBytesStore();
final InternalMockProcessorContext context = new InternalMockProcessorContext(TestUtils.tempDirectory(), new StreamsConfig(streamsConfig));
final Time time = new SystemTime();
context.setSystemTimeMs(time.milliseconds());
bytesStore.init((StateStoreContext) context, bytesStore);
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
// write a record to advance stream time, with a high enough timestamp
// that the subsequent record in windows[0] will already be expired.
bytesStore.put(serializeKey(new Windowed<>("dummy", nextSegmentWindow)), serializeValue(0));
final Bytes key = serializeKey(new Windowed<>("a", windows[0]));
final byte[] value = serializeValue(5);
bytesStore.put(key, value);
final List<String> messages = appender.getMessages();
assertThat(messages, hasItem("Skipping record for expired segment."));
}
final Map<MetricName, ? extends Metric> metrics = context.metrics().metrics();
final String threadId = Thread.currentThread().getName();
final Metric dropTotal;
final Metric dropRate;
dropTotal = metrics.get(new MetricName("dropped-records-total", "stream-task-metrics", "", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0"))));
dropRate = metrics.get(new MetricName("dropped-records-rate", "stream-task-metrics", "", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0"))));
assertEquals(1.0, dropTotal.metricValue());
assertNotEquals(0.0, dropRate.metricValue());
bytesStore.close();
}
use of org.apache.kafka.common.utils.SystemTime in project kafka by apache.
the class AbstractSessionBytesStoreTest method shouldLogAndMeasureExpiredRecords.
@Test
public void shouldLogAndMeasureExpiredRecords() {
final Properties streamsConfig = StreamsTestUtils.getStreamsConfig();
final SessionStore<String, Long> sessionStore = buildSessionStore(RETENTION_PERIOD, Serdes.String(), Serdes.Long());
final InternalMockProcessorContext context = new InternalMockProcessorContext(TestUtils.tempDirectory(), new StreamsConfig(streamsConfig), recordCollector);
final Time time = new SystemTime();
context.setTime(1L);
context.setSystemTimeMs(time.milliseconds());
sessionStore.init((StateStoreContext) context, sessionStore);
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
// Advance stream time by inserting record with large enough timestamp that records with timestamp 0 are expired
// Note that rocksdb will only expire segments at a time (where segment interval = 60,000 for this retention period)
sessionStore.put(new Windowed<>("initial record", new SessionWindow(0, 2 * SEGMENT_INTERVAL)), 0L);
// Try inserting a record with timestamp 0 -- should be dropped
sessionStore.put(new Windowed<>("late record", new SessionWindow(0, 0)), 0L);
sessionStore.put(new Windowed<>("another on-time record", new SessionWindow(0, 2 * SEGMENT_INTERVAL)), 0L);
final List<String> messages = appender.getMessages();
assertThat(messages, hasItem("Skipping record for expired segment."));
}
final Map<MetricName, ? extends Metric> metrics = context.metrics().metrics();
final String threadId = Thread.currentThread().getName();
final Metric dropTotal;
final Metric dropRate;
dropTotal = metrics.get(new MetricName("dropped-records-total", "stream-task-metrics", "", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0"))));
dropRate = metrics.get(new MetricName("dropped-records-rate", "stream-task-metrics", "", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0"))));
assertEquals(1.0, dropTotal.metricValue());
assertNotEquals(0.0, dropRate.metricValue());
sessionStore.close();
}
Aggregations