use of org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo in project kafka by apache.
the class StreamPartitionAssignor method onAssignment.
/**
* @throws TaskAssignmentException if there is no task id for one of the partitions specified
*/
@Override
public void onAssignment(Assignment assignment) {
List<TopicPartition> partitions = new ArrayList<>(assignment.partitions());
Collections.sort(partitions, PARTITION_COMPARATOR);
AssignmentInfo info = AssignmentInfo.decode(assignment.userData());
this.standbyTasks = info.standbyTasks;
this.activeTasks = new HashMap<>();
// could be duplicated if one task has more than one assigned partitions
if (partitions.size() != info.activeTasks.size()) {
throw new TaskAssignmentException(String.format("stream-thread [%s] Number of assigned partitions %d is not equal to the number of active taskIds %d" + ", assignmentInfo=%s", streamThread.getName(), partitions.size(), info.activeTasks.size(), info.toString()));
}
for (int i = 0; i < partitions.size(); i++) {
TopicPartition partition = partitions.get(i);
TaskId id = info.activeTasks.get(i);
Set<TopicPartition> assignedPartitions = activeTasks.get(id);
if (assignedPartitions == null) {
assignedPartitions = new HashSet<>();
activeTasks.put(id, assignedPartitions);
}
assignedPartitions.add(partition);
}
this.partitionsByHostState = info.partitionsByHost;
final Collection<Set<TopicPartition>> values = partitionsByHostState.values();
final Map<TopicPartition, PartitionInfo> topicToPartitionInfo = new HashMap<>();
for (Set<TopicPartition> value : values) {
for (TopicPartition topicPartition : value) {
topicToPartitionInfo.put(topicPartition, new PartitionInfo(topicPartition.topic(), topicPartition.partition(), null, new Node[0], new Node[0]));
}
}
metadataWithInternalTopics = Cluster.empty().withPartitions(topicToPartitionInfo);
}
use of org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo in project kafka by apache.
the class StreamPartitionAssignorTest method testOnAssignment.
@Test
public void testOnAssignment() throws Exception {
TopicPartition t2p3 = new TopicPartition("topic2", 3);
TopologyBuilder builder = new TopologyBuilder();
builder.addSource("source1", "topic1");
builder.addSource("source2", "topic2");
builder.addProcessor("processor", new MockProcessorSupplier(), "source1", "source2");
UUID uuid = UUID.randomUUID();
String client1 = "client1";
StreamThread thread = new StreamThread(builder, config, mockClientSupplier, "test", client1, uuid, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
partitionAssignor.configure(config.getConsumerConfigs(thread, "test", client1));
List<TaskId> activeTaskList = Utils.mkList(task0, task3);
Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
Map<TaskId, Set<TopicPartition>> standbyTasks = new HashMap<>();
activeTasks.put(task0, Utils.mkSet(t1p0));
activeTasks.put(task3, Utils.mkSet(t2p3));
standbyTasks.put(task1, Utils.mkSet(t1p0));
standbyTasks.put(task2, Utils.mkSet(t2p0));
AssignmentInfo info = new AssignmentInfo(activeTaskList, standbyTasks, new HashMap<HostInfo, Set<TopicPartition>>());
PartitionAssignor.Assignment assignment = new PartitionAssignor.Assignment(Utils.mkList(t1p0, t2p3), info.encode());
partitionAssignor.onAssignment(assignment);
assertEquals(activeTasks, partitionAssignor.activeTasks());
assertEquals(standbyTasks, partitionAssignor.standbyTasks());
}
use of org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo in project kafka by apache.
the class StreamPartitionAssignorTest method testAssignWithStandbyReplicas.
@Test
public void testAssignWithStandbyReplicas() throws Exception {
Properties props = configProps();
props.setProperty(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, "1");
StreamsConfig config = new StreamsConfig(props);
builder.addSource("source1", "topic1");
builder.addSource("source2", "topic2");
builder.addProcessor("processor", new MockProcessorSupplier(), "source1", "source2");
List<String> topics = Utils.mkList("topic1", "topic2");
Set<TaskId> allTasks = Utils.mkSet(task0, task1, task2);
final Set<TaskId> prevTasks00 = Utils.mkSet(task0);
final Set<TaskId> prevTasks01 = Utils.mkSet(task1);
final Set<TaskId> prevTasks02 = Utils.mkSet(task2);
final Set<TaskId> standbyTasks01 = Utils.mkSet(task1);
final Set<TaskId> standbyTasks02 = Utils.mkSet(task2);
final Set<TaskId> standbyTasks00 = Utils.mkSet(task0);
UUID uuid1 = UUID.randomUUID();
UUID uuid2 = UUID.randomUUID();
String client1 = "client1";
StreamThread thread10 = new StreamThread(builder, config, mockClientSupplier, "test", client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
partitionAssignor.configure(config.getConsumerConfigs(thread10, "test", client1));
partitionAssignor.setInternalTopicManager(new MockInternalTopicManager(thread10.config, mockClientSupplier.restoreConsumer));
Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
subscriptions.put("consumer10", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, prevTasks00, standbyTasks01, userEndPoint).encode()));
subscriptions.put("consumer11", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, prevTasks01, standbyTasks02, userEndPoint).encode()));
subscriptions.put("consumer20", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid2, prevTasks02, standbyTasks00, "any:9097").encode()));
Map<String, PartitionAssignor.Assignment> assignments = partitionAssignor.assign(metadata, subscriptions);
Set<TaskId> allActiveTasks = new HashSet<>();
Set<TaskId> allStandbyTasks = new HashSet<>();
// the first consumer
AssignmentInfo info10 = checkAssignment(allTopics, assignments.get("consumer10"));
allActiveTasks.addAll(info10.activeTasks);
allStandbyTasks.addAll(info10.standbyTasks.keySet());
// the second consumer
AssignmentInfo info11 = checkAssignment(allTopics, assignments.get("consumer11"));
allActiveTasks.addAll(info11.activeTasks);
allStandbyTasks.addAll(info11.standbyTasks.keySet());
assertNotEquals("same processId has same set of standby tasks", info11.standbyTasks.keySet(), info10.standbyTasks.keySet());
// check active tasks assigned to the first client
assertEquals(Utils.mkSet(task0, task1), new HashSet<>(allActiveTasks));
assertEquals(Utils.mkSet(task2), new HashSet<>(allStandbyTasks));
// the third consumer
AssignmentInfo info20 = checkAssignment(allTopics, assignments.get("consumer20"));
allActiveTasks.addAll(info20.activeTasks);
allStandbyTasks.addAll(info20.standbyTasks.keySet());
// all task ids are in the active tasks and also in the standby tasks
assertEquals(3, allActiveTasks.size());
assertEquals(allTasks, allActiveTasks);
assertEquals(3, allStandbyTasks.size());
assertEquals(allTasks, allStandbyTasks);
}
use of org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo in project kafka by apache.
the class StreamPartitionAssignorTest method shouldExposeHostStateToTopicPartitionsOnAssignment.
@Test
public void shouldExposeHostStateToTopicPartitionsOnAssignment() throws Exception {
List<TopicPartition> topic = Collections.singletonList(new TopicPartition("topic", 0));
final Map<HostInfo, Set<TopicPartition>> hostState = Collections.singletonMap(new HostInfo("localhost", 80), Collections.singleton(new TopicPartition("topic", 0)));
AssignmentInfo assignmentInfo = new AssignmentInfo(Collections.singletonList(new TaskId(0, 0)), Collections.<TaskId, Set<TopicPartition>>emptyMap(), hostState);
partitionAssignor.onAssignment(new PartitionAssignor.Assignment(topic, assignmentInfo.encode()));
assertEquals(hostState, partitionAssignor.getPartitionsByHostState());
}
use of org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo in project kafka by apache.
the class StreamPartitionAssignorTest method checkAssignment.
private AssignmentInfo checkAssignment(Set<String> expectedTopics, PartitionAssignor.Assignment assignment) {
// This assumed 1) DefaultPartitionGrouper is used, and 2) there is a only one topic group.
AssignmentInfo info = AssignmentInfo.decode(assignment.userData());
// check if the number of assigned partitions == the size of active task id list
assertEquals(assignment.partitions().size(), info.activeTasks.size());
// check if active tasks are consistent
List<TaskId> activeTasks = new ArrayList<>();
Set<String> activeTopics = new HashSet<>();
for (TopicPartition partition : assignment.partitions()) {
// since default grouper, taskid.partition == partition.partition()
activeTasks.add(new TaskId(0, partition.partition()));
activeTopics.add(partition.topic());
}
assertEquals(activeTasks, info.activeTasks);
// check if active partitions cover all topics
assertEquals(expectedTopics, activeTopics);
// check if standby tasks are consistent
Set<String> standbyTopics = new HashSet<>();
for (Map.Entry<TaskId, Set<TopicPartition>> entry : info.standbyTasks.entrySet()) {
TaskId id = entry.getKey();
Set<TopicPartition> partitions = entry.getValue();
for (TopicPartition partition : partitions) {
// since default grouper, taskid.partition == partition.partition()
assertEquals(id.partition, partition.partition());
standbyTopics.add(partition.topic());
}
}
if (info.standbyTasks.size() > 0)
// check if standby partitions cover all topics
assertEquals(expectedTopics, standbyTopics);
return info;
}
Aggregations