use of org.apache.kafka.test.MockProcessorSupplier in project apache-kafka-on-k8s by banzaicloud.
the class StreamsPartitionAssignorTest method shouldMapUserEndPointToTopicPartitions.
@Test
public void shouldMapUserEndPointToTopicPartitions() throws Exception {
builder.setApplicationId(applicationId);
builder.addSource(null, "source", null, null, null, "topic1");
builder.addProcessor("processor", new MockProcessorSupplier(), "source");
builder.addSink("sink", "output", null, null, null, "processor");
final List<String> topics = Utils.mkList("topic1");
final UUID uuid1 = UUID.randomUUID();
mockTaskManager(Collections.<TaskId>emptySet(), Collections.<TaskId>emptySet(), uuid1, builder);
configurePartitionAssignor(Collections.singletonMap(StreamsConfig.APPLICATION_SERVER_CONFIG, (Object) userEndPoint));
partitionAssignor.setInternalTopicManager(new MockInternalTopicManager(streamsConfig, mockClientSupplier.restoreConsumer));
final Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
final Set<TaskId> emptyTasks = Collections.emptySet();
subscriptions.put("consumer1", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, emptyTasks, emptyTasks, userEndPoint).encode()));
final Map<String, PartitionAssignor.Assignment> assignments = partitionAssignor.assign(metadata, subscriptions);
final PartitionAssignor.Assignment consumerAssignment = assignments.get("consumer1");
final AssignmentInfo assignmentInfo = AssignmentInfo.decode(consumerAssignment.userData());
final Set<TopicPartition> topicPartitions = assignmentInfo.partitionsByHost().get(new HostInfo("localhost", 8080));
assertEquals(Utils.mkSet(new TopicPartition("topic1", 0), new TopicPartition("topic1", 1), new TopicPartition("topic1", 2)), topicPartitions);
}
use of org.apache.kafka.test.MockProcessorSupplier in project apache-kafka-on-k8s by banzaicloud.
the class StreamsPartitionAssignorTest method shouldAddUserDefinedEndPointToSubscription.
@Test
public void shouldAddUserDefinedEndPointToSubscription() throws Exception {
builder.setApplicationId(applicationId);
builder.addSource(null, "source", null, null, null, "input");
builder.addProcessor("processor", new MockProcessorSupplier(), "source");
builder.addSink("sink", "output", null, null, null, "processor");
final UUID uuid1 = UUID.randomUUID();
mockTaskManager(Collections.<TaskId>emptySet(), Collections.<TaskId>emptySet(), uuid1, builder);
configurePartitionAssignor(Collections.singletonMap(StreamsConfig.APPLICATION_SERVER_CONFIG, (Object) userEndPoint));
final PartitionAssignor.Subscription subscription = partitionAssignor.subscription(Utils.mkSet("input"));
final SubscriptionInfo subscriptionInfo = SubscriptionInfo.decode(subscription.userData());
assertEquals("localhost:8080", subscriptionInfo.userEndPoint());
}
use of org.apache.kafka.test.MockProcessorSupplier in project apache-kafka-on-k8s by banzaicloud.
the class StreamsPartitionAssignorTest method testAssignWithStandbyReplicas.
@Test
public void testAssignWithStandbyReplicas() throws Exception {
Map<String, Object> props = configProps();
props.put(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, "1");
StreamsConfig streamsConfig = new StreamsConfig(props);
builder.addSource(null, "source1", null, null, null, "topic1");
builder.addSource(null, "source2", null, null, null, "topic2");
builder.addProcessor("processor", new MockProcessorSupplier(), "source1", "source2");
List<String> topics = Utils.mkList("topic1", "topic2");
Set<TaskId> allTasks = Utils.mkSet(task0, task1, task2);
final Set<TaskId> prevTasks00 = Utils.mkSet(task0);
final Set<TaskId> prevTasks01 = Utils.mkSet(task1);
final Set<TaskId> prevTasks02 = Utils.mkSet(task2);
final Set<TaskId> standbyTasks01 = Utils.mkSet(task1);
final Set<TaskId> standbyTasks02 = Utils.mkSet(task2);
final Set<TaskId> standbyTasks00 = Utils.mkSet(task0);
UUID uuid1 = UUID.randomUUID();
UUID uuid2 = UUID.randomUUID();
mockTaskManager(prevTasks00, standbyTasks01, uuid1, builder);
configurePartitionAssignor(Collections.<String, Object>singletonMap(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 1));
partitionAssignor.setInternalTopicManager(new MockInternalTopicManager(streamsConfig, mockClientSupplier.restoreConsumer));
Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
subscriptions.put("consumer10", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, prevTasks00, standbyTasks01, userEndPoint).encode()));
subscriptions.put("consumer11", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, prevTasks01, standbyTasks02, userEndPoint).encode()));
subscriptions.put("consumer20", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid2, prevTasks02, standbyTasks00, "any:9097").encode()));
Map<String, PartitionAssignor.Assignment> assignments = partitionAssignor.assign(metadata, subscriptions);
Set<TaskId> allActiveTasks = new HashSet<>();
Set<TaskId> allStandbyTasks = new HashSet<>();
// the first consumer
AssignmentInfo info10 = checkAssignment(allTopics, assignments.get("consumer10"));
allActiveTasks.addAll(info10.activeTasks());
allStandbyTasks.addAll(info10.standbyTasks().keySet());
// the second consumer
AssignmentInfo info11 = checkAssignment(allTopics, assignments.get("consumer11"));
allActiveTasks.addAll(info11.activeTasks());
allStandbyTasks.addAll(info11.standbyTasks().keySet());
assertNotEquals("same processId has same set of standby tasks", info11.standbyTasks().keySet(), info10.standbyTasks().keySet());
// check active tasks assigned to the first client
assertEquals(Utils.mkSet(task0, task1), new HashSet<>(allActiveTasks));
assertEquals(Utils.mkSet(task2), new HashSet<>(allStandbyTasks));
// the third consumer
AssignmentInfo info20 = checkAssignment(allTopics, assignments.get("consumer20"));
allActiveTasks.addAll(info20.activeTasks());
allStandbyTasks.addAll(info20.standbyTasks().keySet());
// all task ids are in the active tasks and also in the standby tasks
assertEquals(3, allActiveTasks.size());
assertEquals(allTasks, allActiveTasks);
assertEquals(3, allStandbyTasks.size());
assertEquals(allTasks, allStandbyTasks);
}
use of org.apache.kafka.test.MockProcessorSupplier in project apache-kafka-on-k8s by banzaicloud.
the class StreamsPartitionAssignorTest method shouldAssignEvenlyAcrossConsumersOneClientMultipleThreads.
@Test
public void shouldAssignEvenlyAcrossConsumersOneClientMultipleThreads() throws Exception {
builder.addSource(null, "source1", null, null, null, "topic1");
builder.addSource(null, "source2", null, null, null, "topic2");
builder.addProcessor("processor", new MockProcessorSupplier(), "source1");
builder.addProcessor("processorII", new MockProcessorSupplier(), "source2");
final List<PartitionInfo> localInfos = Arrays.asList(new PartitionInfo("topic1", 0, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("topic1", 1, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("topic1", 2, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("topic1", 3, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("topic2", 0, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("topic2", 1, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("topic2", 2, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("topic2", 3, Node.noNode(), new Node[0], new Node[0]));
final Cluster localMetadata = new Cluster("cluster", Collections.singletonList(Node.noNode()), localInfos, Collections.<String>emptySet(), Collections.<String>emptySet());
final List<String> topics = Utils.mkList("topic1", "topic2");
final TaskId taskIdA0 = new TaskId(0, 0);
final TaskId taskIdA1 = new TaskId(0, 1);
final TaskId taskIdA2 = new TaskId(0, 2);
final TaskId taskIdA3 = new TaskId(0, 3);
final TaskId taskIdB0 = new TaskId(1, 0);
final TaskId taskIdB1 = new TaskId(1, 1);
final TaskId taskIdB2 = new TaskId(1, 2);
final TaskId taskIdB3 = new TaskId(1, 3);
final UUID uuid1 = UUID.randomUUID();
mockTaskManager(new HashSet<TaskId>(), new HashSet<TaskId>(), uuid1, builder);
configurePartitionAssignor(Collections.<String, Object>emptyMap());
partitionAssignor.setInternalTopicManager(new MockInternalTopicManager(streamsConfig, mockClientSupplier.restoreConsumer));
final Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
subscriptions.put("consumer10", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, new HashSet<TaskId>(), new HashSet<TaskId>(), userEndPoint).encode()));
subscriptions.put("consumer11", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, new HashSet<TaskId>(), new HashSet<TaskId>(), userEndPoint).encode()));
final Map<String, PartitionAssignor.Assignment> assignments = partitionAssignor.assign(localMetadata, subscriptions);
// check assigned partitions
assertEquals(Utils.mkSet(Utils.mkSet(t2p2, t1p0, t1p2, t2p0), Utils.mkSet(t1p1, t2p1, t1p3, t2p3)), Utils.mkSet(new HashSet<>(assignments.get("consumer10").partitions()), new HashSet<>(assignments.get("consumer11").partitions())));
// the first consumer
final AssignmentInfo info10 = AssignmentInfo.decode(assignments.get("consumer10").userData());
final List<TaskId> expectedInfo10TaskIds = Arrays.asList(taskIdA1, taskIdA3, taskIdB1, taskIdB3);
assertEquals(expectedInfo10TaskIds, info10.activeTasks());
// the second consumer
final AssignmentInfo info11 = AssignmentInfo.decode(assignments.get("consumer11").userData());
final List<TaskId> expectedInfo11TaskIds = Arrays.asList(taskIdA0, taskIdA2, taskIdB0, taskIdB2);
assertEquals(expectedInfo11TaskIds, info11.activeTasks());
}
use of org.apache.kafka.test.MockProcessorSupplier in project apache-kafka-on-k8s by banzaicloud.
the class KTableAggregateTest method testAggCoalesced.
@Test
public void testAggCoalesced() {
final StreamsBuilder builder = new StreamsBuilder();
final String topic1 = "topic1";
final MockProcessorSupplier<String, String> proc = new MockProcessorSupplier<>();
KTable<String, String> table1 = builder.table(topic1, consumed);
KTable<String, String> table2 = table1.groupBy(MockMapper.<String, String>noOpKeyValueMapper(), stringSerialzied).aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, MockAggregator.TOSTRING_REMOVER, stringSerde, "topic1-Canonized");
table2.toStream().process(proc);
driver.setUp(builder, stateDir);
driver.process(topic1, "A", "1");
driver.process(topic1, "A", "3");
driver.process(topic1, "A", "4");
driver.flushState();
assertEquals(Utils.mkList("A:0+4"), proc.processed);
}
Aggregations