use of org.apache.kafka.common.utils.Time in project kafka by apache.
the class IntegrationTestUtils method startApplicationAndWaitUntilRunning.
/**
* Starts the given {@link KafkaStreams} instances and waits for all of them to reach the
* {@link State#RUNNING} state at the same time. Note that states may change between the time
* that this method returns and the calling function executes its next statement.<p>
*
* If the application is already started, use {@link #waitForApplicationState(List, State, Duration)}
* to wait for instances to reach {@link State#RUNNING} state.
*
* @param streamsList the list of streams instances to run.
* @param timeout the time to wait for the streams to all be in {@link State#RUNNING} state.
*/
public static void startApplicationAndWaitUntilRunning(final List<KafkaStreams> streamsList, final Duration timeout) throws Exception {
final Lock stateLock = new ReentrantLock();
final Condition stateUpdate = stateLock.newCondition();
final Map<KafkaStreams, State> stateMap = new HashMap<>();
for (final KafkaStreams streams : streamsList) {
stateMap.put(streams, streams.state());
final StateListener prevStateListener = getStateListener(streams);
final StateListener newStateListener = (newState, oldState) -> {
stateLock.lock();
try {
stateMap.put(streams, newState);
if (newState == State.RUNNING) {
if (stateMap.values().stream().allMatch(state -> state == State.RUNNING)) {
stateUpdate.signalAll();
}
}
} finally {
stateLock.unlock();
}
};
streams.setStateListener(prevStateListener != null ? new CompositeStateListener(prevStateListener, newStateListener) : newStateListener);
}
for (final KafkaStreams streams : streamsList) {
streams.start();
}
final long expectedEnd = System.currentTimeMillis() + timeout.toMillis();
stateLock.lock();
try {
// timeout has expired
while (true) {
final Map<KafkaStreams, State> nonRunningStreams = new HashMap<>();
for (final Entry<KafkaStreams, State> entry : stateMap.entrySet()) {
if (entry.getValue() != State.RUNNING) {
nonRunningStreams.put(entry.getKey(), entry.getValue());
}
}
if (nonRunningStreams.isEmpty()) {
return;
}
final long millisRemaining = expectedEnd - System.currentTimeMillis();
if (millisRemaining <= 0) {
fail("Application did not reach a RUNNING state for all streams instances. " + "Non-running instances: " + nonRunningStreams);
}
stateUpdate.await(millisRemaining, TimeUnit.MILLISECONDS);
}
} finally {
stateLock.unlock();
}
}
use of org.apache.kafka.common.utils.Time in project kafka by apache.
the class StreamsMetricsImplTest method shouldNotMeasureLatencyBecauseSensorHasNoMetrics.
@Test
public void shouldNotMeasureLatencyBecauseSensorHasNoMetrics() {
final Sensor sensor = createMock(Sensor.class);
expect(sensor.shouldRecord()).andReturn(true);
expect(sensor.hasMetrics()).andReturn(false);
final Time time = mock(Time.class);
replay(sensor);
StreamsMetricsImpl.maybeMeasureLatency(() -> {
}, time, sensor);
verify(sensor);
}
use of org.apache.kafka.common.utils.Time in project kafka by apache.
the class StoreChangelogReader method restore.
// 1. if there are any registered changelogs that needs initialization, try to initialize them first;
// 2. if all changelogs have finished, return early;
// 3. if there are any restoring changelogs, try to read from the restore consumer and process them.
@Override
public void restore(final Map<TaskId, Task> tasks) {
initializeChangelogs(tasks, registeredChangelogs());
if (!activeRestoringChangelogs().isEmpty() && state == ChangelogReaderState.STANDBY_UPDATING) {
throw new IllegalStateException("Should not be in standby updating state if there are still un-completed active changelogs");
}
if (allChangelogsCompleted()) {
log.debug("Finished restoring all changelogs {}", changelogs.keySet());
return;
}
final Set<TopicPartition> restoringChangelogs = restoringChangelogs();
if (!restoringChangelogs.isEmpty()) {
final ConsumerRecords<byte[], byte[]> polledRecords;
try {
// for restoring active and updating standby we may prefer different poll time
// in order to make sure we call the main consumer#poll in time.
// TODO: once we move ChangelogReader to a separate thread this may no longer be a concern
polledRecords = restoreConsumer.poll(state == ChangelogReaderState.STANDBY_UPDATING ? Duration.ZERO : pollTime);
// TODO (?) If we cannot fetch records during restore, should we trigger `task.timeout.ms` ?
// TODO (?) If we cannot fetch records for standby task, should we trigger `task.timeout.ms` ?
} catch (final InvalidOffsetException e) {
log.warn("Encountered " + e.getClass().getName() + " fetching records from restore consumer for partitions " + e.partitions() + ", it is likely that " + "the consumer's position has fallen out of the topic partition offset range because the topic was " + "truncated or compacted on the broker, marking the corresponding tasks as corrupted and re-initializing" + " it later.", e);
final Set<TaskId> corruptedTasks = new HashSet<>();
e.partitions().forEach(partition -> corruptedTasks.add(changelogs.get(partition).stateManager.taskId()));
throw new TaskCorruptedException(corruptedTasks, e);
} catch (final KafkaException e) {
throw new StreamsException("Restore consumer get unexpected error polling records.", e);
}
for (final TopicPartition partition : polledRecords.partitions()) {
bufferChangelogRecords(restoringChangelogByPartition(partition), polledRecords.records(partition));
}
for (final TopicPartition partition : restoringChangelogs) {
// even if some partition do not have any accumulated data, we still trigger
// restoring since some changelog may not need to restore any at all, and the
// restore to end check needs to be executed still.
// TODO: we always try to restore as a batch when some records are accumulated, which may result in
// small batches; this can be optimized in the future, e.g. wait longer for larger batches.
final TaskId taskId = changelogs.get(partition).stateManager.taskId();
try {
if (restoreChangelog(changelogs.get(partition))) {
tasks.get(taskId).clearTaskTimeout();
}
} catch (final TimeoutException timeoutException) {
tasks.get(taskId).maybeInitTaskTimeoutOrThrow(time.milliseconds(), timeoutException);
}
}
maybeUpdateLimitOffsetsForStandbyChangelogs(tasks);
maybeLogRestorationProgress();
}
}
use of org.apache.kafka.common.utils.Time in project kafka by apache.
the class AbstractWindowBytesStoreTest method shouldLogAndMeasureExpiredRecords.
@Test
public void shouldLogAndMeasureExpiredRecords() {
final Properties streamsConfig = StreamsTestUtils.getStreamsConfig();
final WindowStore<Integer, String> windowStore = buildWindowStore(RETENTION_PERIOD, WINDOW_SIZE, false, Serdes.Integer(), Serdes.String());
final InternalMockProcessorContext context = new InternalMockProcessorContext(TestUtils.tempDirectory(), new StreamsConfig(streamsConfig), recordCollector);
final Time time = new SystemTime();
context.setSystemTimeMs(time.milliseconds());
context.setTime(1L);
windowStore.init((StateStoreContext) context, windowStore);
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) {
// Advance stream time by inserting record with large enough timestamp that records with timestamp 0 are expired
windowStore.put(1, "initial record", 2 * RETENTION_PERIOD);
// Try inserting a record with timestamp 0 -- should be dropped
windowStore.put(1, "late record", 0L);
windowStore.put(1, "another on-time record", RETENTION_PERIOD + 1);
final List<String> messages = appender.getMessages();
assertThat(messages, hasItem("Skipping record for expired segment."));
}
final Map<MetricName, ? extends Metric> metrics = context.metrics().metrics();
final String threadId = Thread.currentThread().getName();
final Metric dropTotal;
final Metric dropRate;
dropTotal = metrics.get(new MetricName("dropped-records-total", "stream-task-metrics", "", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0"))));
dropRate = metrics.get(new MetricName("dropped-records-rate", "stream-task-metrics", "", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0"))));
assertEquals(1.0, dropTotal.metricValue());
assertNotEquals(0.0, dropRate.metricValue());
windowStore.close();
}
use of org.apache.kafka.common.utils.Time in project kafka by apache.
the class MeteredKeyValueStoreTest method before.
@Before
public void before() {
final Time mockTime = new MockTime();
metered = new MeteredKeyValueStore<>(inner, STORE_TYPE, mockTime, Serdes.String(), Serdes.String());
metrics.config().recordLevel(Sensor.RecordingLevel.DEBUG);
expect(context.applicationId()).andStubReturn(APPLICATION_ID);
expect(context.metrics()).andStubReturn(new StreamsMetricsImpl(metrics, "test", StreamsConfig.METRICS_LATEST, mockTime));
expect(context.taskId()).andStubReturn(taskId);
expect(context.changelogFor(STORE_NAME)).andStubReturn(CHANGELOG_TOPIC);
expect(inner.name()).andStubReturn(STORE_NAME);
tags = mkMap(mkEntry(THREAD_ID_TAG_KEY, threadId), mkEntry("task-id", taskId.toString()), mkEntry(STORE_TYPE + "-state-id", STORE_NAME));
}
Aggregations