use of org.apache.kafka.test.MockStateStoreSupplier in project kafka by apache.
the class TopologyBuilderTest method shouldCorrectlyMapStateStoreToInternalTopics.
@Test
public void shouldCorrectlyMapStateStoreToInternalTopics() throws Exception {
final TopologyBuilder builder = new TopologyBuilder();
builder.setApplicationId("appId");
builder.addInternalTopic("internal-topic");
builder.addSource("source", "internal-topic");
builder.addProcessor("processor", new MockProcessorSupplier(), "source");
builder.addStateStore(new MockStateStoreSupplier("store", false), "processor");
final Map<String, List<String>> stateStoreNameToSourceTopic = builder.stateStoreNameToSourceTopics();
assertEquals(1, stateStoreNameToSourceTopic.size());
assertEquals(Collections.singletonList("appId-internal-topic"), stateStoreNameToSourceTopic.get("store"));
}
use of org.apache.kafka.test.MockStateStoreSupplier in project kafka by apache.
the class TopologyBuilderTest method shouldAssociateStateStoreNameWhenStateStoreSupplierIsExternal.
@Test
public void shouldAssociateStateStoreNameWhenStateStoreSupplierIsExternal() throws Exception {
final TopologyBuilder builder = new TopologyBuilder();
builder.addSource("source", "topic");
builder.addProcessor("processor", new MockProcessorSupplier(), "source");
builder.addStateStore(new MockStateStoreSupplier("store", false), "processor");
final Map<String, List<String>> stateStoreNameToSourceTopic = builder.stateStoreNameToSourceTopics();
assertEquals(1, stateStoreNameToSourceTopic.size());
assertEquals(Collections.singletonList("topic"), stateStoreNameToSourceTopic.get("store"));
}
use of org.apache.kafka.test.MockStateStoreSupplier in project kafka by apache.
the class StreamPartitionAssignorTest method testAssignWithStates.
@Test
public void testAssignWithStates() throws Exception {
String applicationId = "test";
builder.setApplicationId(applicationId);
builder.addSource("source1", "topic1");
builder.addSource("source2", "topic2");
builder.addProcessor("processor-1", new MockProcessorSupplier(), "source1");
builder.addStateStore(new MockStateStoreSupplier("store1", false), "processor-1");
builder.addProcessor("processor-2", new MockProcessorSupplier(), "source2");
builder.addStateStore(new MockStateStoreSupplier("store2", false), "processor-2");
builder.addStateStore(new MockStateStoreSupplier("store3", false), "processor-2");
List<String> topics = Utils.mkList("topic1", "topic2");
TaskId task00 = new TaskId(0, 0);
TaskId task01 = new TaskId(0, 1);
TaskId task02 = new TaskId(0, 2);
TaskId task10 = new TaskId(1, 0);
TaskId task11 = new TaskId(1, 1);
TaskId task12 = new TaskId(1, 2);
List<TaskId> tasks = Utils.mkList(task00, task01, task02, task10, task11, task12);
UUID uuid1 = UUID.randomUUID();
UUID uuid2 = UUID.randomUUID();
String client1 = "client1";
StreamThread thread10 = new StreamThread(builder, config, mockClientSupplier, applicationId, client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
partitionAssignor.configure(config.getConsumerConfigs(thread10, applicationId, client1));
partitionAssignor.setInternalTopicManager(new MockInternalTopicManager(thread10.config, mockClientSupplier.restoreConsumer));
Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
subscriptions.put("consumer10", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, Collections.<TaskId>emptySet(), Collections.<TaskId>emptySet(), userEndPoint).encode()));
subscriptions.put("consumer11", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, Collections.<TaskId>emptySet(), Collections.<TaskId>emptySet(), userEndPoint).encode()));
subscriptions.put("consumer20", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid2, Collections.<TaskId>emptySet(), Collections.<TaskId>emptySet(), userEndPoint).encode()));
Map<String, PartitionAssignor.Assignment> assignments = partitionAssignor.assign(metadata, subscriptions);
// check assigned partition size: since there is no previous task and there are two sub-topologies the assignment is random so we cannot check exact match
assertEquals(2, assignments.get("consumer10").partitions().size());
assertEquals(2, assignments.get("consumer11").partitions().size());
assertEquals(2, assignments.get("consumer20").partitions().size());
AssignmentInfo info10 = AssignmentInfo.decode(assignments.get("consumer10").userData());
AssignmentInfo info11 = AssignmentInfo.decode(assignments.get("consumer11").userData());
AssignmentInfo info20 = AssignmentInfo.decode(assignments.get("consumer20").userData());
assertEquals(2, info10.activeTasks.size());
assertEquals(2, info11.activeTasks.size());
assertEquals(2, info20.activeTasks.size());
Set<TaskId> allTasks = new HashSet<>();
allTasks.addAll(info10.activeTasks);
allTasks.addAll(info11.activeTasks);
allTasks.addAll(info20.activeTasks);
assertEquals(new HashSet<>(tasks), allTasks);
// check tasks for state topics
Map<Integer, TopologyBuilder.TopicsInfo> topicGroups = thread10.builder.topicGroups();
assertEquals(Utils.mkSet(task00, task01, task02), tasksForState(applicationId, "store1", tasks, topicGroups));
assertEquals(Utils.mkSet(task10, task11, task12), tasksForState(applicationId, "store2", tasks, topicGroups));
assertEquals(Utils.mkSet(task10, task11, task12), tasksForState(applicationId, "store3", tasks, topicGroups));
}
use of org.apache.kafka.test.MockStateStoreSupplier in project kafka by apache.
the class StreamPartitionAssignorTest method testAssignWithPartialTopology.
@Test
public void testAssignWithPartialTopology() throws Exception {
Properties props = configProps();
props.put(StreamsConfig.PARTITION_GROUPER_CLASS_CONFIG, SingleGroupPartitionGrouperStub.class);
StreamsConfig config = new StreamsConfig(props);
builder.addSource("source1", "topic1");
builder.addProcessor("processor1", new MockProcessorSupplier(), "source1");
builder.addStateStore(new MockStateStoreSupplier("store1", false), "processor1");
builder.addSource("source2", "topic2");
builder.addProcessor("processor2", new MockProcessorSupplier(), "source2");
builder.addStateStore(new MockStateStoreSupplier("store2", false), "processor2");
List<String> topics = Utils.mkList("topic1", "topic2");
Set<TaskId> allTasks = Utils.mkSet(task0, task1, task2);
UUID uuid1 = UUID.randomUUID();
String client1 = "client1";
StreamThread thread10 = new StreamThread(builder, config, mockClientSupplier, "test", client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
partitionAssignor.configure(config.getConsumerConfigs(thread10, "test", client1));
partitionAssignor.setInternalTopicManager(new MockInternalTopicManager(thread10.config, mockClientSupplier.restoreConsumer));
Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
subscriptions.put("consumer10", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, Collections.<TaskId>emptySet(), Collections.<TaskId>emptySet(), userEndPoint).encode()));
// will throw exception if it fails
Map<String, PartitionAssignor.Assignment> assignments = partitionAssignor.assign(metadata, subscriptions);
// check assignment info
Set<TaskId> allActiveTasks = new HashSet<>();
AssignmentInfo info10 = checkAssignment(Utils.mkSet("topic1"), assignments.get("consumer10"));
allActiveTasks.addAll(info10.activeTasks);
assertEquals(3, allActiveTasks.size());
assertEquals(allTasks, new HashSet<>(allActiveTasks));
}
use of org.apache.kafka.test.MockStateStoreSupplier in project kafka by apache.
the class StreamThreadTest method shouldNotViolateAtLeastOnceWhenAnExceptionOccursOnTaskFlushDuringShutdown.
@Test
public void shouldNotViolateAtLeastOnceWhenAnExceptionOccursOnTaskFlushDuringShutdown() throws Exception {
final KStreamBuilder builder = new KStreamBuilder();
builder.setApplicationId(applicationId);
final MockStateStoreSupplier.MockStateStore stateStore = new MockStateStoreSupplier.MockStateStore("foo", false);
builder.stream("t1").groupByKey().count(new MockStateStoreSupplier(stateStore));
final StreamsConfig config = new StreamsConfig(configProps());
final MockClientSupplier clientSupplier = new MockClientSupplier();
final TestStreamTask testStreamTask = new TestStreamTask(new TaskId(0, 0), applicationId, Utils.mkSet(new TopicPartition("t1", 0)), builder.build(0), clientSupplier.consumer, clientSupplier.producer, clientSupplier.restoreConsumer, config, new MockStreamsMetrics(new Metrics()), new StateDirectory(applicationId, config.getString(StreamsConfig.STATE_DIR_CONFIG), time)) {
@Override
public void flushState() {
throw new RuntimeException("KABOOM!");
}
};
final StreamThread thread = new StreamThread(builder, config, clientSupplier, applicationId, clientId, processId, new Metrics(), new MockTime(), new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0) {
@Override
protected StreamTask createStreamTask(final TaskId id, final Collection<TopicPartition> partitions) {
return testStreamTask;
}
};
final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
activeTasks.put(testStreamTask.id, testStreamTask.partitions);
thread.partitionAssignor(new MockStreamsPartitionAssignor(activeTasks));
thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
thread.rebalanceListener.onPartitionsAssigned(testStreamTask.partitions);
// store should have been opened
assertTrue(stateStore.isOpen());
thread.start();
thread.close();
thread.join();
assertFalse("task shouldn't have been committed as there was an exception during shutdown", testStreamTask.committed);
// store should be closed even if we had an exception
assertFalse(stateStore.isOpen());
}
Aggregations