Search in sources :

Example 16 with SystemTime

use of org.apache.kafka.common.utils.SystemTime in project kafka by apache.

the class SensorTest method testShouldRecordForInfoLevelSensor.

@Test
public void testShouldRecordForInfoLevelSensor() {
    Sensor infoSensor = new Sensor(null, "infoSensor", null, INFO_CONFIG, new SystemTime(), 0, Sensor.RecordingLevel.INFO);
    assertTrue(infoSensor.shouldRecord());
    infoSensor = new Sensor(null, "infoSensor", null, DEBUG_CONFIG, new SystemTime(), 0, Sensor.RecordingLevel.INFO);
    assertTrue(infoSensor.shouldRecord());
    infoSensor = new Sensor(null, "infoSensor", null, TRACE_CONFIG, new SystemTime(), 0, Sensor.RecordingLevel.INFO);
    assertTrue(infoSensor.shouldRecord());
}
Also used : SystemTime(org.apache.kafka.common.utils.SystemTime) Test(org.junit.jupiter.api.Test)

Example 17 with SystemTime

use of org.apache.kafka.common.utils.SystemTime in project kafka by apache.

the class ConsumerCoordinatorTest method shouldLoseAllOwnedPartitionsBeforeRejoiningAfterResettingGenerationId.

@Test
public void shouldLoseAllOwnedPartitionsBeforeRejoiningAfterResettingGenerationId() {
    final List<TopicPartition> partitions = singletonList(t1p);
    try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, false, Optional.of("group-id"))) {
        final SystemTime realTime = new SystemTime();
        coordinator.ensureActiveGroup();
        prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.REBALANCE_IN_PROGRESS);
        assertThrows(RebalanceInProgressException.class, () -> coordinator.commitOffsetsSync(singletonMap(t1p, new OffsetAndMetadata(100L)), time.timer(Long.MAX_VALUE)));
        int generationId = 42;
        String memberId = "consumer-42";
        client.prepareResponse(joinGroupFollowerResponse(generationId, memberId, "leader", Errors.NONE));
        client.prepareResponse(syncGroupResponse(Collections.emptyList(), Errors.ILLEGAL_GENERATION));
        boolean res = coordinator.joinGroupIfNeeded(realTime.timer(1000));
        assertFalse(res);
        assertEquals(AbstractCoordinator.Generation.NO_GENERATION.generationId, coordinator.generation().generationId);
        assertEquals(AbstractCoordinator.Generation.NO_GENERATION.protocolName, coordinator.generation().protocolName);
        // member ID should not be reset
        assertEquals(memberId, coordinator.generation().memberId);
        res = coordinator.joinGroupIfNeeded(realTime.timer(1000));
        assertFalse(res);
    }
    Collection<TopicPartition> lost = getLost(partitions);
    assertEquals(lost.isEmpty() ? 0 : 1, rebalanceListener.lostCount);
    assertEquals(lost.isEmpty() ? null : lost, rebalanceListener.lost);
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) SystemTime(org.apache.kafka.common.utils.SystemTime) Test(org.junit.jupiter.api.Test)

Example 18 with SystemTime

use of org.apache.kafka.common.utils.SystemTime in project kafka by apache.

the class KTableSuppressProcessorMetricsTest method shouldRecordMetricsWithBuiltInMetricsVersionLatest.

@Test
public void shouldRecordMetricsWithBuiltInMetricsVersionLatest() {
    final String storeName = "test-store";
    final StateStore buffer = new InMemoryTimeOrderedKeyValueBuffer.Builder<>(storeName, Serdes.String(), Serdes.Long()).withLoggingDisabled().build();
    final KTableImpl<String, ?, Long> mock = EasyMock.mock(KTableImpl.class);
    final Processor<String, Change<Long>, String, Change<Long>> processor = new KTableSuppressProcessorSupplier<>((SuppressedInternal<String>) Suppressed.<String>untilTimeLimit(Duration.ofDays(100), maxRecords(1)), storeName, mock).get();
    streamsConfig.setProperty(StreamsConfig.BUILT_IN_METRICS_VERSION_CONFIG, StreamsConfig.METRICS_LATEST);
    final MockInternalNewProcessorContext<String, Change<Long>> context = new MockInternalNewProcessorContext<>(streamsConfig, TASK_ID, TestUtils.tempDirectory());
    final Time time = new SystemTime();
    context.setCurrentNode(new ProcessorNode("testNode"));
    context.setSystemTimeMs(time.milliseconds());
    buffer.init((StateStoreContext) context, buffer);
    processor.init(context);
    final long timestamp = 100L;
    context.setRecordMetadata("", 0, 0L);
    context.setTimestamp(timestamp);
    final String key = "longKey";
    final Change<Long> value = new Change<>(null, ARBITRARY_LONG);
    processor.process(new Record<>(key, value, timestamp));
    final MetricName evictionRateMetric = evictionRateMetricLatest;
    final MetricName evictionTotalMetric = evictionTotalMetricLatest;
    final MetricName bufferSizeAvgMetric = bufferSizeAvgMetricLatest;
    final MetricName bufferSizeMaxMetric = bufferSizeMaxMetricLatest;
    final MetricName bufferCountAvgMetric = bufferCountAvgMetricLatest;
    final MetricName bufferCountMaxMetric = bufferCountMaxMetricLatest;
    {
        final Map<MetricName, ? extends Metric> metrics = context.metrics().metrics();
        verifyMetric(metrics, evictionRateMetric, is(0.0));
        verifyMetric(metrics, evictionTotalMetric, is(0.0));
        verifyMetric(metrics, bufferSizeAvgMetric, is(21.5));
        verifyMetric(metrics, bufferSizeMaxMetric, is(43.0));
        verifyMetric(metrics, bufferCountAvgMetric, is(0.5));
        verifyMetric(metrics, bufferCountMaxMetric, is(1.0));
    }
    context.setRecordMetadata("", 0, 1L);
    context.setTimestamp(timestamp + 1);
    processor.process(new Record<>("key", value, timestamp + 1));
    {
        final Map<MetricName, ? extends Metric> metrics = context.metrics().metrics();
        verifyMetric(metrics, evictionRateMetric, greaterThan(0.0));
        verifyMetric(metrics, evictionTotalMetric, is(1.0));
        verifyMetric(metrics, bufferSizeAvgMetric, is(41.0));
        verifyMetric(metrics, bufferSizeMaxMetric, is(82.0));
        verifyMetric(metrics, bufferCountAvgMetric, is(1.0));
        verifyMetric(metrics, bufferCountMaxMetric, is(2.0));
    }
}
Also used : StateStore(org.apache.kafka.streams.processor.StateStore) Time(org.apache.kafka.common.utils.Time) SystemTime(org.apache.kafka.common.utils.SystemTime) Change(org.apache.kafka.streams.kstream.internals.Change) MetricName(org.apache.kafka.common.MetricName) ProcessorNode(org.apache.kafka.streams.processor.internals.ProcessorNode) MockInternalNewProcessorContext(org.apache.kafka.test.MockInternalNewProcessorContext) Metric(org.apache.kafka.common.Metric) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) Map(java.util.Map) SystemTime(org.apache.kafka.common.utils.SystemTime) Test(org.junit.Test)

Example 19 with SystemTime

use of org.apache.kafka.common.utils.SystemTime in project kafka by apache.

the class AbstractRocksDBSegmentedBytesStoreTest method shouldLogAndMeasureExpiredRecords.

@Test
public void shouldLogAndMeasureExpiredRecords() {
    final Properties streamsConfig = StreamsTestUtils.getStreamsConfig();
    final AbstractRocksDBSegmentedBytesStore<S> bytesStore = getBytesStore();
    final InternalMockProcessorContext context = new InternalMockProcessorContext(TestUtils.tempDirectory(), new StreamsConfig(streamsConfig));
    final Time time = new SystemTime();
    context.setSystemTimeMs(time.milliseconds());
    bytesStore.init((StateStoreContext) context, bytesStore);
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
        // write a record to advance stream time, with a high enough timestamp
        // that the subsequent record in windows[0] will already be expired.
        bytesStore.put(serializeKey(new Windowed<>("dummy", nextSegmentWindow)), serializeValue(0));
        final Bytes key = serializeKey(new Windowed<>("a", windows[0]));
        final byte[] value = serializeValue(5);
        bytesStore.put(key, value);
        final List<String> messages = appender.getMessages();
        assertThat(messages, hasItem("Skipping record for expired segment."));
    }
    final Map<MetricName, ? extends Metric> metrics = context.metrics().metrics();
    final String threadId = Thread.currentThread().getName();
    final Metric dropTotal;
    final Metric dropRate;
    dropTotal = metrics.get(new MetricName("dropped-records-total", "stream-task-metrics", "", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0"))));
    dropRate = metrics.get(new MetricName("dropped-records-rate", "stream-task-metrics", "", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0"))));
    assertEquals(1.0, dropTotal.metricValue());
    assertNotEquals(0.0, dropRate.metricValue());
    bytesStore.close();
}
Also used : MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) SystemTime(org.apache.kafka.common.utils.SystemTime) Properties(java.util.Properties) Windowed(org.apache.kafka.streams.kstream.Windowed) Bytes(org.apache.kafka.common.utils.Bytes) MetricName(org.apache.kafka.common.MetricName) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) Metric(org.apache.kafka.common.Metric) InternalMockProcessorContext(org.apache.kafka.test.InternalMockProcessorContext) StreamsConfig(org.apache.kafka.streams.StreamsConfig) SystemTime(org.apache.kafka.common.utils.SystemTime) Test(org.junit.Test)

Example 20 with SystemTime

use of org.apache.kafka.common.utils.SystemTime in project kafka by apache.

the class AbstractSessionBytesStoreTest method shouldLogAndMeasureExpiredRecords.

@Test
public void shouldLogAndMeasureExpiredRecords() {
    final Properties streamsConfig = StreamsTestUtils.getStreamsConfig();
    final SessionStore<String, Long> sessionStore = buildSessionStore(RETENTION_PERIOD, Serdes.String(), Serdes.Long());
    final InternalMockProcessorContext context = new InternalMockProcessorContext(TestUtils.tempDirectory(), new StreamsConfig(streamsConfig), recordCollector);
    final Time time = new SystemTime();
    context.setTime(1L);
    context.setSystemTimeMs(time.milliseconds());
    sessionStore.init((StateStoreContext) context, sessionStore);
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
        // Advance stream time by inserting record with large enough timestamp that records with timestamp 0 are expired
        // Note that rocksdb will only expire segments at a time (where segment interval = 60,000 for this retention period)
        sessionStore.put(new Windowed<>("initial record", new SessionWindow(0, 2 * SEGMENT_INTERVAL)), 0L);
        // Try inserting a record with timestamp 0 -- should be dropped
        sessionStore.put(new Windowed<>("late record", new SessionWindow(0, 0)), 0L);
        sessionStore.put(new Windowed<>("another on-time record", new SessionWindow(0, 2 * SEGMENT_INTERVAL)), 0L);
        final List<String> messages = appender.getMessages();
        assertThat(messages, hasItem("Skipping record for expired segment."));
    }
    final Map<MetricName, ? extends Metric> metrics = context.metrics().metrics();
    final String threadId = Thread.currentThread().getName();
    final Metric dropTotal;
    final Metric dropRate;
    dropTotal = metrics.get(new MetricName("dropped-records-total", "stream-task-metrics", "", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0"))));
    dropRate = metrics.get(new MetricName("dropped-records-rate", "stream-task-metrics", "", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0"))));
    assertEquals(1.0, dropTotal.metricValue());
    assertNotEquals(0.0, dropRate.metricValue());
    sessionStore.close();
}
Also used : Time(org.apache.kafka.common.utils.Time) SystemTime(org.apache.kafka.common.utils.SystemTime) Properties(java.util.Properties) MetricName(org.apache.kafka.common.MetricName) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) Metric(org.apache.kafka.common.Metric) SessionWindow(org.apache.kafka.streams.kstream.internals.SessionWindow) InternalMockProcessorContext(org.apache.kafka.test.InternalMockProcessorContext) StreamsConfig(org.apache.kafka.streams.StreamsConfig) SystemTime(org.apache.kafka.common.utils.SystemTime) Test(org.junit.Test)

Aggregations

SystemTime (org.apache.kafka.common.utils.SystemTime)20 Test (org.junit.Test)7 Test (org.junit.jupiter.api.Test)7 Properties (java.util.Properties)5 Metric (org.apache.kafka.common.Metric)4 MetricName (org.apache.kafka.common.MetricName)4 Time (org.apache.kafka.common.utils.Time)4 MetricRegistry (com.codahale.metrics.MetricRegistry)3 KafkaCruiseControlConfig (com.linkedin.kafka.cruisecontrol.config.KafkaCruiseControlConfig)3 HashSet (java.util.HashSet)3 TopicPartition (org.apache.kafka.common.TopicPartition)3 StreamsConfig (org.apache.kafka.streams.StreamsConfig)3 LogCaptureAppender (org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender)3 KeyValueStoreBuilder (org.apache.kafka.streams.state.internals.KeyValueStoreBuilder)3 InternalMockProcessorContext (org.apache.kafka.test.InternalMockProcessorContext)3 OperationProgress (com.linkedin.kafka.cruisecontrol.async.progress.OperationProgress)2 ClusterModelStats (com.linkedin.kafka.cruisecontrol.model.ClusterModelStats)2 Map (java.util.Map)2 TestKafkaBroker (org.apache.ignite.stream.kafka.TestKafkaBroker)2 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)2