use of org.apache.kafka.streams.StreamsMetadata in project kafka by apache.
the class StreamsMetadataState method rebuildMetadataForNamedTopologies.
private List<StreamsMetadata> rebuildMetadataForNamedTopologies(final Map<HostInfo, Set<TopicPartition>> activePartitionHostMap, final Map<HostInfo, Set<TopicPartition>> standbyPartitionHostMap) {
final List<StreamsMetadata> rebuiltMetadata = new ArrayList<>();
Stream.concat(activePartitionHostMap.keySet().stream(), standbyPartitionHostMap.keySet().stream()).distinct().sorted(Comparator.comparing(HostInfo::host).thenComparingInt(HostInfo::port)).forEach(hostInfo -> {
for (final String topologyName : topologyMetadata.namedTopologiesView()) {
final Map<String, List<String>> storeToSourceTopics = topologyMetadata.stateStoreNameToSourceTopicsForTopology(topologyName);
final Set<TopicPartition> activePartitionsOnHost = new HashSet<>();
final Set<String> activeStoresOnHost = new HashSet<>();
if (activePartitionHostMap.containsKey(hostInfo)) {
// filter out partitions for topics that are not connected to this topology
activePartitionsOnHost.addAll(activePartitionHostMap.get(hostInfo).stream().filter(tp -> topologyMetadata.fullSourceTopicNamesForTopology(topologyName).contains(tp.topic())).collect(Collectors.toSet()));
activeStoresOnHost.addAll(getStoresOnHost(storeToSourceTopics, activePartitionsOnHost));
}
// TODO KAFKA-13281: when we add support for global stores with named topologies we will
// need to add the global stores to the activeStoresOnHost set
final Set<TopicPartition> standbyPartitionsOnHost = new HashSet<>();
final Set<String> standbyStoresOnHost = new HashSet<>();
if (standbyPartitionHostMap.containsKey(hostInfo)) {
standbyPartitionsOnHost.addAll(standbyPartitionHostMap.get(hostInfo).stream().filter(tp -> topologyMetadata.fullSourceTopicNamesForTopology(topologyName).contains(tp.topic())).collect(Collectors.toSet()));
standbyStoresOnHost.addAll(getStoresOnHost(storeToSourceTopics, standbyPartitionsOnHost));
}
if (!(activeStoresOnHost.isEmpty() && activePartitionsOnHost.isEmpty() && standbyStoresOnHost.isEmpty() && standbyPartitionsOnHost.isEmpty())) {
final StreamsMetadata metadata = new StreamsMetadataImpl(hostInfo, activeStoresOnHost, activePartitionsOnHost, standbyStoresOnHost, standbyPartitionsOnHost, topologyName);
rebuiltMetadata.add(metadata);
if (hostInfo.equals(thisHost)) {
localMetadata.set(metadata);
}
} else {
log.debug("Host {} has no tasks for topology {} at the moment, this metadata will not be built", hostInfo, topologyName);
}
}
// Construct metadata across all topologies on this host for the `localMetadata` field
final Map<String, List<String>> storeToSourceTopics = topologyMetadata.stateStoreNameToSourceTopics();
final Set<TopicPartition> localActivePartitions = activePartitionHostMap.get(thisHost);
final Set<TopicPartition> localStandbyPartitions = standbyPartitionHostMap.get(thisHost);
localMetadata.set(new StreamsMetadataImpl(thisHost, getStoresOnHost(storeToSourceTopics, localActivePartitions), localActivePartitions, getStoresOnHost(storeToSourceTopics, localStandbyPartitions), localStandbyPartitions));
});
return rebuiltMetadata;
}
use of org.apache.kafka.streams.StreamsMetadata in project kafka by apache.
the class StreamsMetadataTest method shouldNotBeEqualIfDifferInStandByStores.
@Test
public void shouldNotBeEqualIfDifferInStandByStores() {
final StreamsMetadata differStandByStores = new StreamsMetadataImpl(HOST_INFO, STATE_STORE_NAMES, TOPIC_PARTITIONS, mkSet("store1"), STANDBY_TOPIC_PARTITIONS);
assertThat(streamsMetadata, not(equalTo(differStandByStores)));
assertThat(streamsMetadata.hashCode(), not(equalTo(differStandByStores.hashCode())));
}
use of org.apache.kafka.streams.StreamsMetadata in project kafka by apache.
the class StreamsMetadataTest method shouldNotBeEqualIfDifferInTopicPartitions.
@Test
public void shouldNotBeEqualIfDifferInTopicPartitions() {
final StreamsMetadata differTopicPartitions = new StreamsMetadataImpl(HOST_INFO, STATE_STORE_NAMES, mkSet(TP_0), STAND_BY_STORE_NAMES, STANDBY_TOPIC_PARTITIONS);
assertThat(streamsMetadata, not(equalTo(differTopicPartitions)));
assertThat(streamsMetadata.hashCode(), not(equalTo(differTopicPartitions.hashCode())));
}
use of org.apache.kafka.streams.StreamsMetadata in project kafka by apache.
the class StreamsMetadataTest method shouldBeEqualsIfSameObject.
@Test
public void shouldBeEqualsIfSameObject() {
final StreamsMetadata same = new StreamsMetadataImpl(HOST_INFO, STATE_STORE_NAMES, TOPIC_PARTITIONS, STAND_BY_STORE_NAMES, STANDBY_TOPIC_PARTITIONS);
assertThat(streamsMetadata, equalTo(same));
assertThat(streamsMetadata.hashCode(), equalTo(same.hashCode()));
}
use of org.apache.kafka.streams.StreamsMetadata in project kafka by apache.
the class NamedTopologyIntegrationTest method shouldAddAndRemoveNamedTopologiesBeforeStartingAndRouteQueriesToCorrectTopology.
@Test
public void shouldAddAndRemoveNamedTopologiesBeforeStartingAndRouteQueriesToCorrectTopology() throws Exception {
try {
// for this test we have one of the topologies read from an input topic with just one partition so
// that there's only one instance of that topology's store and thus should always have exactly one
// StreamsMetadata returned by any of the methods that look up all hosts with a specific store and topology
CLUSTER.createTopic(SINGLE_PARTITION_INPUT_STREAM, 1, 1);
CLUSTER.createTopic(SINGLE_PARTITION_OUTPUT_STREAM, 1, 1);
produceToInputTopics(SINGLE_PARTITION_INPUT_STREAM, STANDARD_INPUT_DATA);
final String topology1Store = "store-" + TOPOLOGY_1;
final String topology2Store = "store-" + TOPOLOGY_2;
topology1Builder.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(Materialized.as(topology1Store)).toStream().to(OUTPUT_STREAM_1);
topology2Builder.stream(SINGLE_PARTITION_INPUT_STREAM).groupByKey().count(Materialized.as(topology2Store)).toStream().to(SINGLE_PARTITION_OUTPUT_STREAM);
streams.addNamedTopology(topology1Builder.build());
streams.removeNamedTopology(TOPOLOGY_1);
assertThat(streams.getTopologyByName(TOPOLOGY_1), is(Optional.empty()));
streams.addNamedTopology(topology1Builder.build());
streams.addNamedTopology(topology2Builder.build());
IntegrationTestUtils.startApplicationAndWaitUntilRunning(singletonList(streams), Duration.ofSeconds(15));
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, OUTPUT_STREAM_1, 3), equalTo(COUNT_OUTPUT_DATA));
assertThat(waitUntilMinKeyValueRecordsReceived(consumerConfig, SINGLE_PARTITION_OUTPUT_STREAM, 3), equalTo(COUNT_OUTPUT_DATA));
final ReadOnlyKeyValueStore<String, Long> store = streams.store(NamedTopologyStoreQueryParameters.fromNamedTopologyAndStoreNameAndType(TOPOLOGY_1, topology1Store, QueryableStoreTypes.keyValueStore()));
assertThat(store.get("A"), equalTo(2L));
final Collection<StreamsMetadata> streamsMetadata = streams.streamsMetadataForStore(topology1Store, TOPOLOGY_1);
final Collection<StreamsMetadata> streamsMetadata2 = streams.streamsMetadataForStore(topology2Store, TOPOLOGY_2);
assertThat(streamsMetadata.size(), equalTo(1));
assertThat(streamsMetadata2.size(), equalTo(1));
final KeyQueryMetadata keyMetadata = streams.queryMetadataForKey(topology1Store, "A", new StringSerializer(), TOPOLOGY_1);
final KeyQueryMetadata keyMetadata2 = streams.queryMetadataForKey(topology2Store, "A", new StringSerializer(), TOPOLOGY_2);
assertThat(keyMetadata, not(NOT_AVAILABLE));
assertThat(keyMetadata, equalTo(keyMetadata2));
final Map<String, Map<Integer, LagInfo>> partitionLags1 = streams.allLocalStorePartitionLagsForTopology(TOPOLOGY_1);
final Map<String, Map<Integer, LagInfo>> partitionLags2 = streams.allLocalStorePartitionLagsForTopology(TOPOLOGY_2);
assertThat(partitionLags1.keySet(), equalTo(singleton(topology1Store)));
assertThat(partitionLags1.get(topology1Store).keySet(), equalTo(mkSet(0, 1)));
assertThat(partitionLags2.keySet(), equalTo(singleton(topology2Store)));
// only one copy of the store in topology-2
assertThat(partitionLags2.get(topology2Store).keySet(), equalTo(singleton(0)));
// Start up a second node with both topologies
setupSecondKafkaStreams();
topology1Builder2.stream(INPUT_STREAM_1).groupBy((k, v) -> k).count(Materialized.as(topology1Store)).toStream().to(OUTPUT_STREAM_1);
topology2Builder2.stream(SINGLE_PARTITION_INPUT_STREAM).groupByKey().count(Materialized.as(topology2Store)).toStream().to(SINGLE_PARTITION_OUTPUT_STREAM);
streams2.start(asList(topology1Builder2.build(), topology2Builder2.build()));
waitForApplicationState(asList(streams, streams2), State.RUNNING, Duration.ofSeconds(30));
verifyMetadataForTopology(TOPOLOGY_1, streams.streamsMetadataForStore(topology1Store, TOPOLOGY_1), streams2.streamsMetadataForStore(topology1Store, TOPOLOGY_1));
verifyMetadataForTopology(TOPOLOGY_2, streams.streamsMetadataForStore(topology2Store, TOPOLOGY_2), streams2.streamsMetadataForStore(topology2Store, TOPOLOGY_2));
verifyMetadataForTopology(TOPOLOGY_1, streams.allStreamsClientsMetadataForTopology(TOPOLOGY_1), streams2.allStreamsClientsMetadataForTopology(TOPOLOGY_1));
verifyMetadataForTopology(TOPOLOGY_2, streams.allStreamsClientsMetadataForTopology(TOPOLOGY_2), streams2.allStreamsClientsMetadataForTopology(TOPOLOGY_2));
} finally {
CLUSTER.deleteTopics(SINGLE_PARTITION_INPUT_STREAM, SINGLE_PARTITION_OUTPUT_STREAM);
}
}
Aggregations