use of org.apache.kafka.streams.state.HostInfo in project kafka by apache.
the class StreamsPartitionAssignorTest method testOnAssignment.
@Test
public void testOnAssignment() {
taskManager = EasyMock.createStrictMock(TaskManager.class);
final Map<HostInfo, Set<TopicPartition>> hostState = Collections.singletonMap(new HostInfo("localhost", 9090), mkSet(t3p0, t3p3));
final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
activeTasks.put(TASK_0_0, mkSet(t3p0));
activeTasks.put(TASK_0_3, mkSet(t3p3));
final Map<TaskId, Set<TopicPartition>> standbyTasks = new HashMap<>();
standbyTasks.put(TASK_0_1, mkSet(t3p1));
standbyTasks.put(TASK_0_2, mkSet(t3p2));
taskManager.handleAssignment(activeTasks, standbyTasks);
EasyMock.expectLastCall();
streamsMetadataState = EasyMock.createStrictMock(StreamsMetadataState.class);
final Capture<Cluster> capturedCluster = EasyMock.newCapture();
streamsMetadataState.onChange(EasyMock.eq(hostState), EasyMock.anyObject(), EasyMock.capture(capturedCluster));
EasyMock.expectLastCall();
EasyMock.replay(streamsMetadataState);
configureDefaultPartitionAssignor();
final List<TaskId> activeTaskList = asList(TASK_0_0, TASK_0_3);
final AssignmentInfo info = new AssignmentInfo(LATEST_SUPPORTED_VERSION, activeTaskList, standbyTasks, hostState, emptyMap(), 0);
final Assignment assignment = new Assignment(asList(t3p0, t3p3), info.encode());
partitionAssignor.onAssignment(assignment, null);
EasyMock.verify(streamsMetadataState);
EasyMock.verify(taskManager);
assertEquals(singleton(t3p0.topic()), capturedCluster.getValue().topics());
assertEquals(2, capturedCluster.getValue().partitionsForTopic(t3p0.topic()).size());
}
use of org.apache.kafka.streams.state.HostInfo in project kafka by apache.
the class StreamsMetadataStateTest method shouldGetInstancesForStoreName.
@Test
public void shouldGetInstancesForStoreName() {
final StreamsMetadata one = new StreamsMetadataImpl(hostOne, mkSet(globalTable, "table-one", "table-two", "merged-table"), mkSet(topic1P0, topic2P1, topic4P0), mkSet("table-one", "table-two", "merged-table"), mkSet(topic2P0, topic1P1));
final StreamsMetadata two = new StreamsMetadataImpl(hostTwo, mkSet(globalTable, "table-two", "table-one", "merged-table"), mkSet(topic2P0, topic1P1), mkSet("table-three"), mkSet(topic3P0));
final Collection<StreamsMetadata> actual = metadataState.getAllMetadataForStore("table-one");
final Map<HostInfo, StreamsMetadata> actualAsMap = actual.stream().collect(Collectors.toMap(StreamsMetadata::hostInfo, Function.identity()));
assertEquals(3, actual.size());
assertTrue("expected " + actual + " to contain " + one, actual.contains(one));
assertTrue("expected " + actual + " to contain " + two, actual.contains(two));
assertTrue("expected " + hostThree + " to contain as standby", actualAsMap.get(hostThree).standbyStateStoreNames().contains("table-one"));
}
use of org.apache.kafka.streams.state.HostInfo in project kafka by apache.
the class StreamsMetadataStateTest method shouldGetAllStreamsInstancesWithNoStores.
@Test
public void shouldGetAllStreamsInstancesWithNoStores() {
builder.stream("topic-five").filter((key, value) -> true).to("some-other-topic");
final TopicPartition tp5 = new TopicPartition("topic-five", 1);
final HostInfo hostFour = new HostInfo("host-four", 8080);
hostToActivePartitions.put(hostFour, mkSet(tp5));
metadataState.onChange(hostToActivePartitions, Collections.emptyMap(), cluster.withPartitions(Collections.singletonMap(tp5, new PartitionInfo("topic-five", 1, null, null, null))));
final StreamsMetadata expected = new StreamsMetadataImpl(hostFour, Collections.singleton(globalTable), Collections.singleton(tp5), Collections.emptySet(), Collections.emptySet());
final Collection<StreamsMetadata> actual = metadataState.getAllMetadata();
assertTrue("expected " + actual + " to contain " + expected, actual.contains(expected));
}
use of org.apache.kafka.streams.state.HostInfo in project kafka by apache.
the class StreamsMetadataState method rebuildMetadataForSingleTopology.
private List<StreamsMetadata> rebuildMetadataForSingleTopology(final Map<HostInfo, Set<TopicPartition>> activePartitionHostMap, final Map<HostInfo, Set<TopicPartition>> standbyPartitionHostMap) {
final List<StreamsMetadata> rebuiltMetadata = new ArrayList<>();
final Map<String, List<String>> storeToSourceTopics = topologyMetadata.stateStoreNameToSourceTopics();
Stream.concat(activePartitionHostMap.keySet().stream(), standbyPartitionHostMap.keySet().stream()).distinct().sorted(Comparator.comparing(HostInfo::host).thenComparingInt(HostInfo::port)).forEach(hostInfo -> {
final Set<TopicPartition> activePartitionsOnHost = new HashSet<>();
final Set<String> activeStoresOnHost = new HashSet<>();
if (activePartitionHostMap.containsKey(hostInfo)) {
activePartitionsOnHost.addAll(activePartitionHostMap.get(hostInfo));
activeStoresOnHost.addAll(getStoresOnHost(storeToSourceTopics, activePartitionsOnHost));
}
activeStoresOnHost.addAll(globalStores);
final Set<TopicPartition> standbyPartitionsOnHost = new HashSet<>();
final Set<String> standbyStoresOnHost = new HashSet<>();
if (standbyPartitionHostMap.containsKey(hostInfo)) {
standbyPartitionsOnHost.addAll(standbyPartitionHostMap.get(hostInfo));
standbyStoresOnHost.addAll(getStoresOnHost(storeToSourceTopics, standbyPartitionsOnHost));
}
final StreamsMetadata metadata = new StreamsMetadataImpl(hostInfo, activeStoresOnHost, activePartitionsOnHost, standbyStoresOnHost, standbyPartitionsOnHost);
rebuiltMetadata.add(metadata);
if (hostInfo.equals(thisHost)) {
localMetadata.set(metadata);
}
});
return rebuiltMetadata;
}
use of org.apache.kafka.streams.state.HostInfo in project kafka by apache.
the class StreamsMetadataState method rebuildMetadataForNamedTopologies.
private List<StreamsMetadata> rebuildMetadataForNamedTopologies(final Map<HostInfo, Set<TopicPartition>> activePartitionHostMap, final Map<HostInfo, Set<TopicPartition>> standbyPartitionHostMap) {
final List<StreamsMetadata> rebuiltMetadata = new ArrayList<>();
Stream.concat(activePartitionHostMap.keySet().stream(), standbyPartitionHostMap.keySet().stream()).distinct().sorted(Comparator.comparing(HostInfo::host).thenComparingInt(HostInfo::port)).forEach(hostInfo -> {
for (final String topologyName : topologyMetadata.namedTopologiesView()) {
final Map<String, List<String>> storeToSourceTopics = topologyMetadata.stateStoreNameToSourceTopicsForTopology(topologyName);
final Set<TopicPartition> activePartitionsOnHost = new HashSet<>();
final Set<String> activeStoresOnHost = new HashSet<>();
if (activePartitionHostMap.containsKey(hostInfo)) {
// filter out partitions for topics that are not connected to this topology
activePartitionsOnHost.addAll(activePartitionHostMap.get(hostInfo).stream().filter(tp -> topologyMetadata.fullSourceTopicNamesForTopology(topologyName).contains(tp.topic())).collect(Collectors.toSet()));
activeStoresOnHost.addAll(getStoresOnHost(storeToSourceTopics, activePartitionsOnHost));
}
// TODO KAFKA-13281: when we add support for global stores with named topologies we will
// need to add the global stores to the activeStoresOnHost set
final Set<TopicPartition> standbyPartitionsOnHost = new HashSet<>();
final Set<String> standbyStoresOnHost = new HashSet<>();
if (standbyPartitionHostMap.containsKey(hostInfo)) {
standbyPartitionsOnHost.addAll(standbyPartitionHostMap.get(hostInfo).stream().filter(tp -> topologyMetadata.fullSourceTopicNamesForTopology(topologyName).contains(tp.topic())).collect(Collectors.toSet()));
standbyStoresOnHost.addAll(getStoresOnHost(storeToSourceTopics, standbyPartitionsOnHost));
}
if (!(activeStoresOnHost.isEmpty() && activePartitionsOnHost.isEmpty() && standbyStoresOnHost.isEmpty() && standbyPartitionsOnHost.isEmpty())) {
final StreamsMetadata metadata = new StreamsMetadataImpl(hostInfo, activeStoresOnHost, activePartitionsOnHost, standbyStoresOnHost, standbyPartitionsOnHost, topologyName);
rebuiltMetadata.add(metadata);
if (hostInfo.equals(thisHost)) {
localMetadata.set(metadata);
}
} else {
log.debug("Host {} has no tasks for topology {} at the moment, this metadata will not be built", hostInfo, topologyName);
}
}
// Construct metadata across all topologies on this host for the `localMetadata` field
final Map<String, List<String>> storeToSourceTopics = topologyMetadata.stateStoreNameToSourceTopics();
final Set<TopicPartition> localActivePartitions = activePartitionHostMap.get(thisHost);
final Set<TopicPartition> localStandbyPartitions = standbyPartitionHostMap.get(thisHost);
localMetadata.set(new StreamsMetadataImpl(thisHost, getStoresOnHost(storeToSourceTopics, localActivePartitions), localActivePartitions, getStoresOnHost(storeToSourceTopics, localStandbyPartitions), localStandbyPartitions));
});
return rebuiltMetadata;
}
Aggregations