use of org.apache.kafka.test.MockProcessorSupplier in project kafka by apache.
the class TopologyBuilderTest method shouldAssociateStateStoreNameWhenStateStoreSupplierIsInternal.
@Test
public void shouldAssociateStateStoreNameWhenStateStoreSupplierIsInternal() throws Exception {
final TopologyBuilder builder = new TopologyBuilder();
builder.addSource("source", "topic");
builder.addProcessor("processor", new MockProcessorSupplier(), "source");
builder.addStateStore(new MockStateStoreSupplier("store", false), "processor");
final Map<String, List<String>> stateStoreNameToSourceTopic = builder.stateStoreNameToSourceTopics();
assertEquals(1, stateStoreNameToSourceTopic.size());
assertEquals(Collections.singletonList("topic"), stateStoreNameToSourceTopic.get("store"));
}
use of org.apache.kafka.test.MockProcessorSupplier in project kafka by apache.
the class TopologyBuilderTest method testTopicGroups.
@Test
public void testTopicGroups() {
final TopologyBuilder builder = new TopologyBuilder();
builder.setApplicationId("X");
builder.addInternalTopic("topic-1x");
builder.addSource("source-1", "topic-1", "topic-1x");
builder.addSource("source-2", "topic-2");
builder.addSource("source-3", "topic-3");
builder.addSource("source-4", "topic-4");
builder.addSource("source-5", "topic-5");
builder.addProcessor("processor-1", new MockProcessorSupplier(), "source-1");
builder.addProcessor("processor-2", new MockProcessorSupplier(), "source-2", "processor-1");
builder.copartitionSources(mkList("source-1", "source-2"));
builder.addProcessor("processor-3", new MockProcessorSupplier(), "source-3", "source-4");
Map<Integer, TopicsInfo> topicGroups = builder.topicGroups();
Map<Integer, TopicsInfo> expectedTopicGroups = new HashMap<>();
expectedTopicGroups.put(0, new TopicsInfo(Collections.<String>emptySet(), mkSet("topic-1", "X-topic-1x", "topic-2"), Collections.<String, InternalTopicConfig>emptyMap(), Collections.<String, InternalTopicConfig>emptyMap()));
expectedTopicGroups.put(1, new TopicsInfo(Collections.<String>emptySet(), mkSet("topic-3", "topic-4"), Collections.<String, InternalTopicConfig>emptyMap(), Collections.<String, InternalTopicConfig>emptyMap()));
expectedTopicGroups.put(2, new TopicsInfo(Collections.<String>emptySet(), mkSet("topic-5"), Collections.<String, InternalTopicConfig>emptyMap(), Collections.<String, InternalTopicConfig>emptyMap()));
assertEquals(3, topicGroups.size());
assertEquals(expectedTopicGroups, topicGroups);
Collection<Set<String>> copartitionGroups = builder.copartitionGroups();
assertEquals(mkSet(mkSet("topic-1", "X-topic-1x", "topic-2")), new HashSet<>(copartitionGroups));
}
use of org.apache.kafka.test.MockProcessorSupplier in project kafka by apache.
the class TopologyBuilderTest method testAddStateStore.
@Test
public void testAddStateStore() {
final TopologyBuilder builder = new TopologyBuilder();
StateStoreSupplier supplier = new MockStateStoreSupplier("store-1", false);
builder.addStateStore(supplier);
builder.setApplicationId("X");
builder.addSource("source-1", "topic-1");
builder.addProcessor("processor-1", new MockProcessorSupplier(), "source-1");
assertEquals(0, builder.build(null).stateStores().size());
builder.connectProcessorAndStateStores("processor-1", "store-1");
List<StateStore> suppliers = builder.build(null).stateStores();
assertEquals(1, suppliers.size());
assertEquals(supplier.name(), suppliers.get(0).name());
}
use of org.apache.kafka.test.MockProcessorSupplier in project kafka by apache.
the class TopologyBuilderTest method shouldAddInternalTopicConfigWithCompactForNonWindowStores.
@SuppressWarnings("unchecked")
@Test
public void shouldAddInternalTopicConfigWithCompactForNonWindowStores() throws Exception {
final TopologyBuilder builder = new TopologyBuilder();
builder.setApplicationId("appId");
builder.addSource("source", "topic");
builder.addProcessor("processor", new MockProcessorSupplier(), "source");
builder.addStateStore(new MockStateStoreSupplier("name", true), "processor");
final Map<Integer, TopicsInfo> topicGroups = builder.topicGroups();
final TopicsInfo topicsInfo = topicGroups.values().iterator().next();
final InternalTopicConfig topicConfig = topicsInfo.stateChangelogTopics.get("appId-name-changelog");
final Properties properties = topicConfig.toProperties(0);
assertEquals("appId-name-changelog", topicConfig.name());
assertEquals("compact", properties.getProperty(InternalTopicManager.CLEANUP_POLICY_PROP));
assertEquals(1, properties.size());
}
use of org.apache.kafka.test.MockProcessorSupplier in project kafka by apache.
the class StreamPartitionAssignorTest method testAssignWithNewTasks.
@Test
public void testAssignWithNewTasks() throws Exception {
builder.addSource("source1", "topic1");
builder.addSource("source2", "topic2");
builder.addSource("source3", "topic3");
builder.addProcessor("processor", new MockProcessorSupplier(), "source1", "source2", "source3");
List<String> topics = Utils.mkList("topic1", "topic2", "topic3");
Set<TaskId> allTasks = Utils.mkSet(task0, task1, task2, task3);
// assuming that previous tasks do not have topic3
final Set<TaskId> prevTasks10 = Utils.mkSet(task0);
final Set<TaskId> prevTasks11 = Utils.mkSet(task1);
final Set<TaskId> prevTasks20 = Utils.mkSet(task2);
UUID uuid1 = UUID.randomUUID();
UUID uuid2 = UUID.randomUUID();
String client1 = "client1";
StreamThread thread10 = new StreamThread(builder, config, mockClientSupplier, "test", client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
partitionAssignor.configure(config.getConsumerConfigs(thread10, "test", client1));
partitionAssignor.setInternalTopicManager(new MockInternalTopicManager(thread10.config, mockClientSupplier.restoreConsumer));
Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
subscriptions.put("consumer10", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, prevTasks10, Collections.<TaskId>emptySet(), userEndPoint).encode()));
subscriptions.put("consumer11", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, prevTasks11, Collections.<TaskId>emptySet(), userEndPoint).encode()));
subscriptions.put("consumer20", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid2, prevTasks20, Collections.<TaskId>emptySet(), userEndPoint).encode()));
Map<String, PartitionAssignor.Assignment> assignments = partitionAssignor.assign(metadata, subscriptions);
// check assigned partitions: since there is no previous task for topic 3 it will be assigned randomly so we cannot check exact match
// also note that previously assigned partitions / tasks may not stay on the previous host since we may assign the new task first and
// then later ones will be re-assigned to other hosts due to load balancing
Set<TaskId> allActiveTasks = new HashSet<>();
Set<TopicPartition> allPartitions = new HashSet<>();
AssignmentInfo info;
info = AssignmentInfo.decode(assignments.get("consumer10").userData());
allActiveTasks.addAll(info.activeTasks);
allPartitions.addAll(assignments.get("consumer10").partitions());
info = AssignmentInfo.decode(assignments.get("consumer11").userData());
allActiveTasks.addAll(info.activeTasks);
allPartitions.addAll(assignments.get("consumer11").partitions());
info = AssignmentInfo.decode(assignments.get("consumer20").userData());
allActiveTasks.addAll(info.activeTasks);
allPartitions.addAll(assignments.get("consumer20").partitions());
assertEquals(allTasks, allActiveTasks);
assertEquals(Utils.mkSet(t1p0, t1p1, t1p2, t2p0, t2p1, t2p2, t3p0, t3p1, t3p2, t3p3), allPartitions);
}
Aggregations