Search in sources :

Example 56 with Time

use of org.apache.kafka.common.utils.Time in project kafka by apache.

the class IntegrationTestUtils method startApplicationAndWaitUntilRunning.

/**
 * Starts the given {@link KafkaStreams} instances and waits for all of them to reach the
 * {@link State#RUNNING} state at the same time. Note that states may change between the time
 * that this method returns and the calling function executes its next statement.<p>
 *
 * If the application is already started, use {@link #waitForApplicationState(List, State, Duration)}
 * to wait for instances to reach {@link State#RUNNING} state.
 *
 * @param streamsList the list of streams instances to run.
 * @param timeout the time to wait for the streams to all be in {@link State#RUNNING} state.
 */
public static void startApplicationAndWaitUntilRunning(final List<KafkaStreams> streamsList, final Duration timeout) throws Exception {
    final Lock stateLock = new ReentrantLock();
    final Condition stateUpdate = stateLock.newCondition();
    final Map<KafkaStreams, State> stateMap = new HashMap<>();
    for (final KafkaStreams streams : streamsList) {
        stateMap.put(streams, streams.state());
        final StateListener prevStateListener = getStateListener(streams);
        final StateListener newStateListener = (newState, oldState) -> {
            stateLock.lock();
            try {
                stateMap.put(streams, newState);
                if (newState == State.RUNNING) {
                    if (stateMap.values().stream().allMatch(state -> state == State.RUNNING)) {
                        stateUpdate.signalAll();
                    }
                }
            } finally {
                stateLock.unlock();
            }
        };
        streams.setStateListener(prevStateListener != null ? new CompositeStateListener(prevStateListener, newStateListener) : newStateListener);
    }
    for (final KafkaStreams streams : streamsList) {
        streams.start();
    }
    final long expectedEnd = System.currentTimeMillis() + timeout.toMillis();
    stateLock.lock();
    try {
        // timeout has expired
        while (true) {
            final Map<KafkaStreams, State> nonRunningStreams = new HashMap<>();
            for (final Entry<KafkaStreams, State> entry : stateMap.entrySet()) {
                if (entry.getValue() != State.RUNNING) {
                    nonRunningStreams.put(entry.getKey(), entry.getValue());
                }
            }
            if (nonRunningStreams.isEmpty()) {
                return;
            }
            final long millisRemaining = expectedEnd - System.currentTimeMillis();
            if (millisRemaining <= 0) {
                fail("Application did not reach a RUNNING state for all streams instances. " + "Non-running instances: " + nonRunningStreams);
            }
            stateUpdate.await(millisRemaining, TimeUnit.MILLISECONDS);
        }
    } finally {
        stateLock.unlock();
    }
}
Also used : ReentrantLock(java.util.concurrent.locks.ReentrantLock) TestCondition(org.apache.kafka.test.TestCondition) TestUtils.waitForCondition(org.apache.kafka.test.TestUtils.waitForCondition) Condition(java.util.concurrent.locks.Condition) KafkaStreamsNamedTopologyWrapper(org.apache.kafka.streams.processor.internals.namedtopology.KafkaStreamsNamedTopologyWrapper) LoggerFactory(org.slf4j.LoggerFactory) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) TestCondition(org.apache.kafka.test.TestCondition) Future(java.util.concurrent.Future) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Duration(java.time.Duration) Map(java.util.Map) Metric(org.apache.kafka.common.Metric) Assert.fail(org.junit.Assert.fail) Request(kafka.api.Request) Consumer(org.apache.kafka.clients.consumer.Consumer) TopicPartition(org.apache.kafka.common.TopicPartition) Time(org.apache.kafka.common.utils.Time) TestUtils(org.apache.kafka.test.TestUtils) Collection(java.util.Collection) KeyValue(org.apache.kafka.streams.KeyValue) FailureReason(org.apache.kafka.streams.query.FailureReason) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) State(org.apache.kafka.streams.KafkaStreams.State) StateRestoreListener(org.apache.kafka.streams.processor.StateRestoreListener) Collectors(java.util.stream.Collectors) Objects(java.util.Objects) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) StateQueryResult(org.apache.kafka.streams.query.StateQueryResult) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Matchers.equalTo(org.hamcrest.Matchers.equalTo) Entry(java.util.Map.Entry) Optional(java.util.Optional) Matchers.is(org.hamcrest.Matchers.is) InvalidStateStoreException(org.apache.kafka.streams.errors.InvalidStateStoreException) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) QueryableStoreType(org.apache.kafka.streams.state.QueryableStoreType) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Headers(org.apache.kafka.common.header.Headers) ConsumerGroupDescription(org.apache.kafka.clients.admin.ConsumerGroupDescription) HashMap(java.util.HashMap) AssignmentListener(org.apache.kafka.streams.processor.internals.assignment.AssignorConfiguration.AssignmentListener) ArrayList(java.util.ArrayList) TestUtils.retryOnExceptionWithTimeout(org.apache.kafka.test.TestUtils.retryOnExceptionWithTimeout) StateQueryRequest(org.apache.kafka.streams.query.StateQueryRequest) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) TestName(org.junit.rules.TestName) UpdateMetadataPartitionState(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState) Admin(org.apache.kafka.clients.admin.Admin) KafkaServer(kafka.server.KafkaServer) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) LinkedList(java.util.LinkedList) QueryResult(org.apache.kafka.streams.query.QueryResult) Utils(org.apache.kafka.common.utils.Utils) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Matchers.greaterThanOrEqualTo(org.hamcrest.Matchers.greaterThanOrEqualTo) Logger(org.slf4j.Logger) Properties(java.util.Properties) Iterator(java.util.Iterator) ReentrantLock(java.util.concurrent.locks.ReentrantLock) Producer(org.apache.kafka.clients.producer.Producer) StreamThread(org.apache.kafka.streams.processor.internals.StreamThread) TestUtils.waitForCondition(org.apache.kafka.test.TestUtils.waitForCondition) ThreadStateTransitionValidator(org.apache.kafka.streams.processor.internals.ThreadStateTransitionValidator) IOException(java.io.IOException) Option(scala.Option) Field(java.lang.reflect.Field) KeyValueTimestamp(org.apache.kafka.streams.KeyValueTimestamp) File(java.io.File) StateListener(org.apache.kafka.streams.KafkaStreams.StateListener) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) AtomicLong(java.util.concurrent.atomic.AtomicLong) Condition(java.util.concurrent.locks.Condition) Lock(java.util.concurrent.locks.Lock) Paths(java.nio.file.Paths) MetadataCache(kafka.server.MetadataCache) StoreQueryParameters(org.apache.kafka.streams.StoreQueryParameters) Utils.sleep(org.apache.kafka.common.utils.Utils.sleep) KafkaStreams(org.apache.kafka.streams.KafkaStreams) Collections(java.util.Collections) KafkaStreams(org.apache.kafka.streams.KafkaStreams) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) State(org.apache.kafka.streams.KafkaStreams.State) UpdateMetadataPartitionState(org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState) StateListener(org.apache.kafka.streams.KafkaStreams.StateListener) ReentrantLock(java.util.concurrent.locks.ReentrantLock) Lock(java.util.concurrent.locks.Lock)

Example 57 with Time

use of org.apache.kafka.common.utils.Time in project kafka by apache.

the class StreamsMetricsImplTest method shouldNotMeasureLatencyBecauseSensorHasNoMetrics.

@Test
public void shouldNotMeasureLatencyBecauseSensorHasNoMetrics() {
    final Sensor sensor = createMock(Sensor.class);
    expect(sensor.shouldRecord()).andReturn(true);
    expect(sensor.hasMetrics()).andReturn(false);
    final Time time = mock(Time.class);
    replay(sensor);
    StreamsMetricsImpl.maybeMeasureLatency(() -> {
    }, time, sensor);
    verify(sensor);
}
Also used : MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) Sensor(org.apache.kafka.common.metrics.Sensor) StreamsMetricsImpl.addInvocationRateAndCountToSensor(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl.addInvocationRateAndCountToSensor) StreamsMetricsImpl.addAvgAndMaxLatencyToSensor(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl.addAvgAndMaxLatencyToSensor) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 58 with Time

use of org.apache.kafka.common.utils.Time in project kafka by apache.

the class StoreChangelogReader method restore.

// 1. if there are any registered changelogs that needs initialization, try to initialize them first;
// 2. if all changelogs have finished, return early;
// 3. if there are any restoring changelogs, try to read from the restore consumer and process them.
@Override
public void restore(final Map<TaskId, Task> tasks) {
    initializeChangelogs(tasks, registeredChangelogs());
    if (!activeRestoringChangelogs().isEmpty() && state == ChangelogReaderState.STANDBY_UPDATING) {
        throw new IllegalStateException("Should not be in standby updating state if there are still un-completed active changelogs");
    }
    if (allChangelogsCompleted()) {
        log.debug("Finished restoring all changelogs {}", changelogs.keySet());
        return;
    }
    final Set<TopicPartition> restoringChangelogs = restoringChangelogs();
    if (!restoringChangelogs.isEmpty()) {
        final ConsumerRecords<byte[], byte[]> polledRecords;
        try {
            // for restoring active and updating standby we may prefer different poll time
            // in order to make sure we call the main consumer#poll in time.
            // TODO: once we move ChangelogReader to a separate thread this may no longer be a concern
            polledRecords = restoreConsumer.poll(state == ChangelogReaderState.STANDBY_UPDATING ? Duration.ZERO : pollTime);
        // TODO (?) If we cannot fetch records during restore, should we trigger `task.timeout.ms` ?
        // TODO (?) If we cannot fetch records for standby task, should we trigger `task.timeout.ms` ?
        } catch (final InvalidOffsetException e) {
            log.warn("Encountered " + e.getClass().getName() + " fetching records from restore consumer for partitions " + e.partitions() + ", it is likely that " + "the consumer's position has fallen out of the topic partition offset range because the topic was " + "truncated or compacted on the broker, marking the corresponding tasks as corrupted and re-initializing" + " it later.", e);
            final Set<TaskId> corruptedTasks = new HashSet<>();
            e.partitions().forEach(partition -> corruptedTasks.add(changelogs.get(partition).stateManager.taskId()));
            throw new TaskCorruptedException(corruptedTasks, e);
        } catch (final KafkaException e) {
            throw new StreamsException("Restore consumer get unexpected error polling records.", e);
        }
        for (final TopicPartition partition : polledRecords.partitions()) {
            bufferChangelogRecords(restoringChangelogByPartition(partition), polledRecords.records(partition));
        }
        for (final TopicPartition partition : restoringChangelogs) {
            // even if some partition do not have any accumulated data, we still trigger
            // restoring since some changelog may not need to restore any at all, and the
            // restore to end check needs to be executed still.
            // TODO: we always try to restore as a batch when some records are accumulated, which may result in
            // small batches; this can be optimized in the future, e.g. wait longer for larger batches.
            final TaskId taskId = changelogs.get(partition).stateManager.taskId();
            try {
                if (restoreChangelog(changelogs.get(partition))) {
                    tasks.get(taskId).clearTaskTimeout();
                }
            } catch (final TimeoutException timeoutException) {
                tasks.get(taskId).maybeInitTaskTimeoutOrThrow(time.milliseconds(), timeoutException);
            }
        }
        maybeUpdateLimitOffsetsForStandbyChangelogs(tasks);
        maybeLogRestorationProgress();
    }
}
Also used : StreamsConfig(org.apache.kafka.streams.StreamsConfig) Arrays(java.util.Arrays) TaskId(org.apache.kafka.streams.processor.TaskId) KafkaException(org.apache.kafka.common.KafkaException) HashMap(java.util.HashMap) StreamsException(org.apache.kafka.streams.errors.StreamsException) ClientUtils.fetchCommittedOffsets(org.apache.kafka.streams.processor.internals.ClientUtils.fetchCommittedOffsets) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) Function(java.util.function.Function) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) LogContext(org.apache.kafka.common.utils.LogContext) Duration(java.time.Duration) Map(java.util.Map) Admin(org.apache.kafka.clients.admin.Admin) TaskCorruptedException(org.apache.kafka.streams.errors.TaskCorruptedException) Consumer(org.apache.kafka.clients.consumer.Consumer) TopicPartition(org.apache.kafka.common.TopicPartition) StateStoreMetadata(org.apache.kafka.streams.processor.internals.ProcessorStateManager.StateStoreMetadata) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Logger(org.slf4j.Logger) Time(org.apache.kafka.common.utils.Time) Collection(java.util.Collection) Set(java.util.Set) StateRestoreListener(org.apache.kafka.streams.processor.StateRestoreListener) Collectors(java.util.stream.Collectors) OffsetSpec(org.apache.kafka.clients.admin.OffsetSpec) ExecutionException(java.util.concurrent.ExecutionException) IsolationLevel(org.apache.kafka.common.IsolationLevel) List(java.util.List) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) InvalidOffsetException(org.apache.kafka.clients.consumer.InvalidOffsetException) ListOffsetsOptions(org.apache.kafka.clients.admin.ListOffsetsOptions) Collections(java.util.Collections) HashSet(java.util.HashSet) Set(java.util.Set) TaskId(org.apache.kafka.streams.processor.TaskId) StreamsException(org.apache.kafka.streams.errors.StreamsException) InvalidOffsetException(org.apache.kafka.clients.consumer.InvalidOffsetException) TaskCorruptedException(org.apache.kafka.streams.errors.TaskCorruptedException) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaException(org.apache.kafka.common.KafkaException) TimeoutException(org.apache.kafka.common.errors.TimeoutException)

Example 59 with Time

use of org.apache.kafka.common.utils.Time in project kafka by apache.

the class AbstractWindowBytesStoreTest method shouldLogAndMeasureExpiredRecords.

@Test
public void shouldLogAndMeasureExpiredRecords() {
    final Properties streamsConfig = StreamsTestUtils.getStreamsConfig();
    final WindowStore<Integer, String> windowStore = buildWindowStore(RETENTION_PERIOD, WINDOW_SIZE, false, Serdes.Integer(), Serdes.String());
    final InternalMockProcessorContext context = new InternalMockProcessorContext(TestUtils.tempDirectory(), new StreamsConfig(streamsConfig), recordCollector);
    final Time time = new SystemTime();
    context.setSystemTimeMs(time.milliseconds());
    context.setTime(1L);
    windowStore.init((StateStoreContext) context, windowStore);
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
        // Advance stream time by inserting record with large enough timestamp that records with timestamp 0 are expired
        windowStore.put(1, "initial record", 2 * RETENTION_PERIOD);
        // Try inserting a record with timestamp 0 -- should be dropped
        windowStore.put(1, "late record", 0L);
        windowStore.put(1, "another on-time record", RETENTION_PERIOD + 1);
        final List<String> messages = appender.getMessages();
        assertThat(messages, hasItem("Skipping record for expired segment."));
    }
    final Map<MetricName, ? extends Metric> metrics = context.metrics().metrics();
    final String threadId = Thread.currentThread().getName();
    final Metric dropTotal;
    final Metric dropRate;
    dropTotal = metrics.get(new MetricName("dropped-records-total", "stream-task-metrics", "", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0"))));
    dropRate = metrics.get(new MetricName("dropped-records-rate", "stream-task-metrics", "", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0"))));
    assertEquals(1.0, dropTotal.metricValue());
    assertNotEquals(0.0, dropRate.metricValue());
    windowStore.close();
}
Also used : Time(org.apache.kafka.common.utils.Time) SystemTime(org.apache.kafka.common.utils.SystemTime) Properties(java.util.Properties) MetricName(org.apache.kafka.common.MetricName) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) Metric(org.apache.kafka.common.Metric) InternalMockProcessorContext(org.apache.kafka.test.InternalMockProcessorContext) StreamsConfig(org.apache.kafka.streams.StreamsConfig) SystemTime(org.apache.kafka.common.utils.SystemTime) Test(org.junit.Test)

Example 60 with Time

use of org.apache.kafka.common.utils.Time in project kafka by apache.

the class MeteredKeyValueStoreTest method before.

@Before
public void before() {
    final Time mockTime = new MockTime();
    metered = new MeteredKeyValueStore<>(inner, STORE_TYPE, mockTime, Serdes.String(), Serdes.String());
    metrics.config().recordLevel(Sensor.RecordingLevel.DEBUG);
    expect(context.applicationId()).andStubReturn(APPLICATION_ID);
    expect(context.metrics()).andStubReturn(new StreamsMetricsImpl(metrics, "test", StreamsConfig.METRICS_LATEST, mockTime));
    expect(context.taskId()).andStubReturn(taskId);
    expect(context.changelogFor(STORE_NAME)).andStubReturn(CHANGELOG_TOPIC);
    expect(inner.name()).andStubReturn(STORE_NAME);
    tags = mkMap(mkEntry(THREAD_ID_TAG_KEY, threadId), mkEntry("task-id", taskId.toString()), mkEntry(STORE_TYPE + "-state-id", STORE_NAME));
}
Also used : MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) MockTime(org.apache.kafka.common.utils.MockTime) Before(org.junit.Before)

Aggregations

Time (org.apache.kafka.common.utils.Time)125 MockTime (org.apache.kafka.common.utils.MockTime)107 Test (org.junit.jupiter.api.Test)63 MockClient (org.apache.kafka.clients.MockClient)55 HashMap (java.util.HashMap)53 Cluster (org.apache.kafka.common.Cluster)41 Test (org.junit.Test)40 Node (org.apache.kafka.common.Node)39 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)32 MetadataResponse (org.apache.kafka.common.requests.MetadataResponse)31 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)30 Metadata (org.apache.kafka.clients.Metadata)28 ProducerMetadata (org.apache.kafka.clients.producer.internals.ProducerMetadata)25 TopicPartition (org.apache.kafka.common.TopicPartition)22 PartitionAssignor (org.apache.kafka.clients.consumer.internals.PartitionAssignor)21 LogContext (org.apache.kafka.common.utils.LogContext)17 Map (java.util.Map)14 Properties (java.util.Properties)14 MetricName (org.apache.kafka.common.MetricName)14 ExecutionException (java.util.concurrent.ExecutionException)13