use of org.apache.kafka.common.utils.MockTime in project kafka by apache.
the class StreamThreadTest method testMaybeClean.
@Test
public void testMaybeClean() throws Exception {
File baseDir = Files.createTempDirectory("test").toFile();
try {
final long cleanupDelay = 1000L;
Properties props = configProps();
props.setProperty(StreamsConfig.STATE_CLEANUP_DELAY_MS_CONFIG, Long.toString(cleanupDelay));
props.setProperty(StreamsConfig.STATE_DIR_CONFIG, baseDir.getCanonicalPath());
StreamsConfig config = new StreamsConfig(props);
File applicationDir = new File(baseDir, applicationId);
applicationDir.mkdir();
File stateDir1 = new File(applicationDir, task1.toString());
File stateDir2 = new File(applicationDir, task2.toString());
File stateDir3 = new File(applicationDir, task3.toString());
File extraDir = new File(applicationDir, "X");
stateDir1.mkdir();
stateDir2.mkdir();
stateDir3.mkdir();
extraDir.mkdir();
final MockTime mockTime = new MockTime();
TopologyBuilder builder = new TopologyBuilder().setApplicationId("X");
builder.addSource("source1", "topic1");
MockClientSupplier mockClientSupplier = new MockClientSupplier();
StreamThread thread = new StreamThread(builder, config, mockClientSupplier, applicationId, clientId, processId, new Metrics(), mockTime, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0) {
@Override
public void maybeClean(long now) {
super.maybeClean(now);
}
@Override
protected StreamTask createStreamTask(TaskId id, Collection<TopicPartition> partitionsForTask) {
ProcessorTopology topology = builder.build(id.topicGroupId);
return new TestStreamTask(id, applicationId, partitionsForTask, topology, consumer, producer, restoreConsumer, config, new MockStreamsMetrics(new Metrics()), stateDirectory);
}
};
initPartitionGrouper(config, thread, mockClientSupplier);
ConsumerRebalanceListener rebalanceListener = thread.rebalanceListener;
assertTrue(thread.tasks().isEmpty());
mockTime.sleep(cleanupDelay);
// all directories exist since an assignment didn't happen
assertTrue(stateDir1.exists());
assertTrue(stateDir2.exists());
assertTrue(stateDir3.exists());
assertTrue(extraDir.exists());
List<TopicPartition> revokedPartitions;
List<TopicPartition> assignedPartitions;
Map<TaskId, StreamTask> prevTasks;
//
// Assign t1p1 and t1p2. This should create task1 & task2
//
final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
activeTasks.put(task1, Collections.singleton(t1p1));
activeTasks.put(task2, Collections.singleton(t1p2));
thread.partitionAssignor(new MockStreamsPartitionAssignor(activeTasks));
revokedPartitions = Collections.emptyList();
assignedPartitions = Arrays.asList(t1p1, t1p2);
prevTasks = new HashMap<>(thread.tasks());
rebalanceListener.onPartitionsRevoked(revokedPartitions);
rebalanceListener.onPartitionsAssigned(assignedPartitions);
// there shouldn't be any previous task
assertTrue(prevTasks.isEmpty());
// task 1 & 2 are created
assertEquals(2, thread.tasks().size());
// all directories should still exit before the cleanup delay time
mockTime.sleep(cleanupDelay - 10L);
thread.maybeClean(mockTime.milliseconds());
assertTrue(stateDir1.exists());
assertTrue(stateDir2.exists());
assertTrue(stateDir3.exists());
assertTrue(extraDir.exists());
// all state directories except for task task2 & task3 will be removed. the extra directory should still exists
mockTime.sleep(11L);
thread.maybeClean(mockTime.milliseconds());
assertTrue(stateDir1.exists());
assertTrue(stateDir2.exists());
assertFalse(stateDir3.exists());
assertTrue(extraDir.exists());
//
// Revoke t1p1 and t1p2. This should remove task1 & task2
//
activeTasks.clear();
revokedPartitions = assignedPartitions;
assignedPartitions = Collections.emptyList();
prevTasks = new HashMap<>(thread.tasks());
rebalanceListener.onPartitionsRevoked(revokedPartitions);
rebalanceListener.onPartitionsAssigned(assignedPartitions);
// previous tasks should be committed
assertEquals(2, prevTasks.size());
for (StreamTask task : prevTasks.values()) {
assertTrue(((TestStreamTask) task).committed);
((TestStreamTask) task).committed = false;
}
// no task
assertTrue(thread.tasks().isEmpty());
// all state directories for task task1 & task2 still exist before the cleanup delay time
mockTime.sleep(cleanupDelay - 10L);
thread.maybeClean(mockTime.milliseconds());
assertTrue(stateDir1.exists());
assertTrue(stateDir2.exists());
assertFalse(stateDir3.exists());
assertTrue(extraDir.exists());
// all state directories for task task1 & task2 are removed
mockTime.sleep(11L);
thread.maybeClean(mockTime.milliseconds());
assertFalse(stateDir1.exists());
assertFalse(stateDir2.exists());
assertFalse(stateDir3.exists());
assertTrue(extraDir.exists());
} finally {
Utils.delete(baseDir);
}
}
use of org.apache.kafka.common.utils.MockTime in project kafka by apache.
the class StreamThreadTest method shouldCloseSuspendedTasksThatAreNoLongerAssignedToThisStreamThreadBeforeCreatingNewTasks.
@Test
public void shouldCloseSuspendedTasksThatAreNoLongerAssignedToThisStreamThreadBeforeCreatingNewTasks() throws Exception {
final KStreamBuilder builder = new KStreamBuilder();
builder.setApplicationId(applicationId);
builder.stream("t1").groupByKey().count("count-one");
builder.stream("t2").groupByKey().count("count-two");
final StreamsConfig config = new StreamsConfig(configProps());
final MockClientSupplier clientSupplier = new MockClientSupplier();
final StreamThread thread = new StreamThread(builder, config, clientSupplier, applicationId, clientId, processId, new Metrics(), new MockTime(), new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
final MockConsumer<byte[], byte[]> restoreConsumer = clientSupplier.restoreConsumer;
restoreConsumer.updatePartitions("stream-thread-test-count-one-changelog", Collections.singletonList(new PartitionInfo("stream-thread-test-count-one-changelog", 0, null, new Node[0], new Node[0])));
restoreConsumer.updatePartitions("stream-thread-test-count-two-changelog", Collections.singletonList(new PartitionInfo("stream-thread-test-count-two-changelog", 0, null, new Node[0], new Node[0])));
final HashMap<TopicPartition, Long> offsets = new HashMap<>();
offsets.put(new TopicPartition("stream-thread-test-count-one-changelog", 0), 0L);
offsets.put(new TopicPartition("stream-thread-test-count-two-changelog", 0), 0L);
restoreConsumer.updateEndOffsets(offsets);
restoreConsumer.updateBeginningOffsets(offsets);
final Map<TaskId, Set<TopicPartition>> standbyTasks = new HashMap<>();
final TopicPartition t1 = new TopicPartition("t1", 0);
standbyTasks.put(new TaskId(0, 0), Utils.mkSet(t1));
final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
final TopicPartition t2 = new TopicPartition("t2", 0);
activeTasks.put(new TaskId(1, 0), Utils.mkSet(t2));
thread.partitionAssignor(new StreamPartitionAssignor() {
@Override
Map<TaskId, Set<TopicPartition>> standbyTasks() {
return standbyTasks;
}
@Override
Map<TaskId, Set<TopicPartition>> activeTasks() {
return activeTasks;
}
});
thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
thread.rebalanceListener.onPartitionsAssigned(Utils.mkSet(t2));
// swap the assignment around and make sure we don't get any exceptions
standbyTasks.clear();
activeTasks.clear();
standbyTasks.put(new TaskId(1, 0), Utils.mkSet(t2));
activeTasks.put(new TaskId(0, 0), Utils.mkSet(t1));
thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
thread.rebalanceListener.onPartitionsAssigned(Utils.mkSet(t1));
}
use of org.apache.kafka.common.utils.MockTime in project kafka by apache.
the class StoreChangelogReaderTest method shouldFallbackToPartitionsForIfPartitionNotInAllPartitionsList.
@SuppressWarnings("unchecked")
@Test
public void shouldFallbackToPartitionsForIfPartitionNotInAllPartitionsList() throws Exception {
final MockConsumer<byte[], byte[]> consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) {
@Override
public List<PartitionInfo> partitionsFor(final String topic) {
return Collections.singletonList(partitionInfo);
}
};
final StoreChangelogReader changelogReader = new StoreChangelogReader(consumer, new MockTime(), 10);
changelogReader.validatePartitionExists(topicPartition, "store");
}
use of org.apache.kafka.common.utils.MockTime in project kafka by apache.
the class StoreChangelogReaderTest method shouldThrowStreamsExceptionIfTimeoutOccursDuringPartitionsFor.
@SuppressWarnings("unchecked")
@Test
public void shouldThrowStreamsExceptionIfTimeoutOccursDuringPartitionsFor() throws Exception {
final MockConsumer<byte[], byte[]> consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) {
@Override
public List<PartitionInfo> partitionsFor(final String topic) {
throw new TimeoutException("KABOOM!");
}
};
final StoreChangelogReader changelogReader = new StoreChangelogReader(consumer, new MockTime(), 5);
try {
changelogReader.validatePartitionExists(topicPartition, "store");
fail("Should have thrown streams exception");
} catch (final StreamsException e) {
// pass
}
}
use of org.apache.kafka.common.utils.MockTime in project cruise-control by linkedin.
the class SessionManagerTest method prepareRequests.
private TestContext prepareRequests(boolean expectSessionInvalidation, int numRequests) {
Time time = new MockTime();
List<HttpServletRequest> requests = new ArrayList<>(numRequests);
List<HttpSession> sessions = new ArrayList<>();
for (int i = 0; i < numRequests; i++) {
HttpServletRequest request = EasyMock.mock(HttpServletRequest.class);
HttpSession session = EasyMock.mock(HttpSession.class);
requests.add(request);
sessions.add(session);
EasyMock.expect(request.getSession()).andReturn(session).anyTimes();
EasyMock.expect(request.getSession(false)).andReturn(session).anyTimes();
EasyMock.expect(request.getMethod()).andReturn("GET").anyTimes();
EasyMock.expect(request.getRequestURI()).andReturn("/test").anyTimes();
EasyMock.expect(request.getParameterMap()).andReturn(Collections.emptyMap()).anyTimes();
EasyMock.expect(session.getLastAccessedTime()).andReturn(time.milliseconds()).anyTimes();
if (expectSessionInvalidation) {
session.invalidate();
EasyMock.expectLastCall().once();
}
}
EasyMock.replay(requests.toArray());
EasyMock.replay(sessions.toArray());
return new TestContext(requests, time);
}
Aggregations