use of org.apache.kafka.test.MockInternalTopicManager in project kafka by apache.
the class StreamPartitionAssignorTest method shouldNotLoopInfinitelyOnMissingMetadataAndShouldNotCreateRelatedTasks.
@Test
public void shouldNotLoopInfinitelyOnMissingMetadataAndShouldNotCreateRelatedTasks() {
final String applicationId = "application-id";
final KStreamBuilder builder = new KStreamBuilder();
builder.setApplicationId(applicationId);
KStream<Object, Object> stream1 = builder.stream("topic1").selectKey(new KeyValueMapper<Object, Object, Object>() {
@Override
public Object apply(Object key, Object value) {
return null;
}
}).groupByKey().count("count").toStream().map(new KeyValueMapper<Object, Long, KeyValue<Object, Object>>() {
@Override
public KeyValue<Object, Object> apply(Object key, Long value) {
return null;
}
});
builder.stream("unknownTopic").selectKey(new KeyValueMapper<Object, Object, Object>() {
@Override
public Object apply(Object key, Object value) {
return null;
}
}).join(stream1, new ValueJoiner() {
@Override
public Object apply(Object value1, Object value2) {
return null;
}
}, JoinWindows.of(0));
final UUID uuid = UUID.randomUUID();
final String client = "client1";
final StreamThread streamThread = new StreamThread(builder, config, mockClientSupplier, applicationId, client, uuid, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
partitionAssignor.configure(config.getConsumerConfigs(streamThread, applicationId, client));
final MockInternalTopicManager mockInternalTopicManager = new MockInternalTopicManager(streamThread.config, mockClientSupplier.restoreConsumer);
partitionAssignor.setInternalTopicManager(mockInternalTopicManager);
final Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
final Set<TaskId> emptyTasks = Collections.emptySet();
subscriptions.put(client, new PartitionAssignor.Subscription(Collections.singletonList("unknownTopic"), new SubscriptionInfo(uuid, emptyTasks, emptyTasks, userEndPoint).encode()));
final Map<String, PartitionAssignor.Assignment> assignment = partitionAssignor.assign(metadata, subscriptions);
final Map<String, Integer> expectedCreatedInternalTopics = new HashMap<>();
expectedCreatedInternalTopics.put(applicationId + "-count-repartition", 3);
expectedCreatedInternalTopics.put(applicationId + "-count-changelog", 3);
assertThat(mockInternalTopicManager.readyTopics, equalTo(expectedCreatedInternalTopics));
final List<TopicPartition> expectedAssignment = Arrays.asList(new TopicPartition("topic1", 0), new TopicPartition("topic1", 1), new TopicPartition("topic1", 2), new TopicPartition(applicationId + "-count-repartition", 0), new TopicPartition(applicationId + "-count-repartition", 1), new TopicPartition(applicationId + "-count-repartition", 2));
assertThat(new HashSet(assignment.get(client).partitions()), equalTo(new HashSet(expectedAssignment)));
}
use of org.apache.kafka.test.MockInternalTopicManager in project kafka by apache.
the class StreamPartitionAssignorTest method testAssignWithStates.
@Test
public void testAssignWithStates() throws Exception {
String applicationId = "test";
builder.setApplicationId(applicationId);
builder.addSource("source1", "topic1");
builder.addSource("source2", "topic2");
builder.addProcessor("processor-1", new MockProcessorSupplier(), "source1");
builder.addStateStore(new MockStateStoreSupplier("store1", false), "processor-1");
builder.addProcessor("processor-2", new MockProcessorSupplier(), "source2");
builder.addStateStore(new MockStateStoreSupplier("store2", false), "processor-2");
builder.addStateStore(new MockStateStoreSupplier("store3", false), "processor-2");
List<String> topics = Utils.mkList("topic1", "topic2");
TaskId task00 = new TaskId(0, 0);
TaskId task01 = new TaskId(0, 1);
TaskId task02 = new TaskId(0, 2);
TaskId task10 = new TaskId(1, 0);
TaskId task11 = new TaskId(1, 1);
TaskId task12 = new TaskId(1, 2);
List<TaskId> tasks = Utils.mkList(task00, task01, task02, task10, task11, task12);
UUID uuid1 = UUID.randomUUID();
UUID uuid2 = UUID.randomUUID();
String client1 = "client1";
StreamThread thread10 = new StreamThread(builder, config, mockClientSupplier, applicationId, client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
partitionAssignor.configure(config.getConsumerConfigs(thread10, applicationId, client1));
partitionAssignor.setInternalTopicManager(new MockInternalTopicManager(thread10.config, mockClientSupplier.restoreConsumer));
Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
subscriptions.put("consumer10", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, Collections.<TaskId>emptySet(), Collections.<TaskId>emptySet(), userEndPoint).encode()));
subscriptions.put("consumer11", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, Collections.<TaskId>emptySet(), Collections.<TaskId>emptySet(), userEndPoint).encode()));
subscriptions.put("consumer20", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid2, Collections.<TaskId>emptySet(), Collections.<TaskId>emptySet(), userEndPoint).encode()));
Map<String, PartitionAssignor.Assignment> assignments = partitionAssignor.assign(metadata, subscriptions);
// check assigned partition size: since there is no previous task and there are two sub-topologies the assignment is random so we cannot check exact match
assertEquals(2, assignments.get("consumer10").partitions().size());
assertEquals(2, assignments.get("consumer11").partitions().size());
assertEquals(2, assignments.get("consumer20").partitions().size());
AssignmentInfo info10 = AssignmentInfo.decode(assignments.get("consumer10").userData());
AssignmentInfo info11 = AssignmentInfo.decode(assignments.get("consumer11").userData());
AssignmentInfo info20 = AssignmentInfo.decode(assignments.get("consumer20").userData());
assertEquals(2, info10.activeTasks.size());
assertEquals(2, info11.activeTasks.size());
assertEquals(2, info20.activeTasks.size());
Set<TaskId> allTasks = new HashSet<>();
allTasks.addAll(info10.activeTasks);
allTasks.addAll(info11.activeTasks);
allTasks.addAll(info20.activeTasks);
assertEquals(new HashSet<>(tasks), allTasks);
// check tasks for state topics
Map<Integer, TopologyBuilder.TopicsInfo> topicGroups = thread10.builder.topicGroups();
assertEquals(Utils.mkSet(task00, task01, task02), tasksForState(applicationId, "store1", tasks, topicGroups));
assertEquals(Utils.mkSet(task10, task11, task12), tasksForState(applicationId, "store2", tasks, topicGroups));
assertEquals(Utils.mkSet(task10, task11, task12), tasksForState(applicationId, "store3", tasks, topicGroups));
}
use of org.apache.kafka.test.MockInternalTopicManager in project kafka by apache.
the class StreamPartitionAssignorTest method shouldThrowExceptionIfApplicationServerConfigIsNotHostPortPair.
@Test
public void shouldThrowExceptionIfApplicationServerConfigIsNotHostPortPair() throws Exception {
final Properties properties = configProps();
final String myEndPoint = "localhost";
properties.put(StreamsConfig.APPLICATION_SERVER_CONFIG, myEndPoint);
final StreamsConfig config = new StreamsConfig(properties);
final UUID uuid1 = UUID.randomUUID();
final String client1 = "client1";
final String applicationId = "application-id";
builder.setApplicationId(applicationId);
final StreamThread streamThread = new StreamThread(builder, config, mockClientSupplier, applicationId, client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
partitionAssignor.setInternalTopicManager(new MockInternalTopicManager(streamThread.config, mockClientSupplier.restoreConsumer));
try {
partitionAssignor.configure(config.getConsumerConfigs(streamThread, applicationId, client1));
Assert.fail("expected to an exception due to invalid config");
} catch (ConfigException e) {
// pass
}
}
use of org.apache.kafka.test.MockInternalTopicManager in project kafka by apache.
the class StreamPartitionAssignorTest method testAssignBasic.
@Test
public void testAssignBasic() throws Exception {
builder.addSource("source1", "topic1");
builder.addSource("source2", "topic2");
builder.addProcessor("processor", new MockProcessorSupplier(), "source1", "source2");
List<String> topics = Utils.mkList("topic1", "topic2");
Set<TaskId> allTasks = Utils.mkSet(task0, task1, task2);
final Set<TaskId> prevTasks10 = Utils.mkSet(task0);
final Set<TaskId> prevTasks11 = Utils.mkSet(task1);
final Set<TaskId> prevTasks20 = Utils.mkSet(task2);
final Set<TaskId> standbyTasks10 = Utils.mkSet(task1);
final Set<TaskId> standbyTasks11 = Utils.mkSet(task2);
final Set<TaskId> standbyTasks20 = Utils.mkSet(task0);
UUID uuid1 = UUID.randomUUID();
UUID uuid2 = UUID.randomUUID();
String client1 = "client1";
StreamThread thread10 = new StreamThread(builder, config, mockClientSupplier, "test", client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
partitionAssignor.configure(config.getConsumerConfigs(thread10, "test", client1));
partitionAssignor.setInternalTopicManager(new MockInternalTopicManager(thread10.config, mockClientSupplier.restoreConsumer));
Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
subscriptions.put("consumer10", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, prevTasks10, standbyTasks10, userEndPoint).encode()));
subscriptions.put("consumer11", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, prevTasks11, standbyTasks11, userEndPoint).encode()));
subscriptions.put("consumer20", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid2, prevTasks20, standbyTasks20, userEndPoint).encode()));
Map<String, PartitionAssignor.Assignment> assignments = partitionAssignor.assign(metadata, subscriptions);
// check assigned partitions
assertEquals(Utils.mkSet(Utils.mkSet(t1p0, t2p0), Utils.mkSet(t1p1, t2p1)), Utils.mkSet(new HashSet<>(assignments.get("consumer10").partitions()), new HashSet<>(assignments.get("consumer11").partitions())));
assertEquals(Utils.mkSet(t1p2, t2p2), new HashSet<>(assignments.get("consumer20").partitions()));
// check assignment info
Set<TaskId> allActiveTasks = new HashSet<>();
// the first consumer
AssignmentInfo info10 = checkAssignment(allTopics, assignments.get("consumer10"));
allActiveTasks.addAll(info10.activeTasks);
// the second consumer
AssignmentInfo info11 = checkAssignment(allTopics, assignments.get("consumer11"));
allActiveTasks.addAll(info11.activeTasks);
assertEquals(Utils.mkSet(task0, task1), allActiveTasks);
// the third consumer
AssignmentInfo info20 = checkAssignment(allTopics, assignments.get("consumer20"));
allActiveTasks.addAll(info20.activeTasks);
assertEquals(3, allActiveTasks.size());
assertEquals(allTasks, new HashSet<>(allActiveTasks));
assertEquals(3, allActiveTasks.size());
assertEquals(allTasks, allActiveTasks);
}
use of org.apache.kafka.test.MockInternalTopicManager in project kafka by apache.
the class StreamPartitionAssignorTest method shouldMapUserEndPointToTopicPartitions.
@Test
public void shouldMapUserEndPointToTopicPartitions() throws Exception {
final Properties properties = configProps();
final String myEndPoint = "localhost:8080";
properties.put(StreamsConfig.APPLICATION_SERVER_CONFIG, myEndPoint);
final StreamsConfig config = new StreamsConfig(properties);
final String applicationId = "application-id";
builder.setApplicationId(applicationId);
builder.addSource("source", "topic1");
builder.addProcessor("processor", new MockProcessorSupplier(), "source");
builder.addSink("sink", "output", "processor");
final List<String> topics = Utils.mkList("topic1");
final UUID uuid1 = UUID.randomUUID();
final String client1 = "client1";
final StreamThread streamThread = new StreamThread(builder, config, mockClientSupplier, applicationId, client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
final StreamPartitionAssignor partitionAssignor = new StreamPartitionAssignor();
partitionAssignor.configure(config.getConsumerConfigs(streamThread, applicationId, client1));
partitionAssignor.setInternalTopicManager(new MockInternalTopicManager(streamThread.config, mockClientSupplier.restoreConsumer));
final Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
final Set<TaskId> emptyTasks = Collections.emptySet();
subscriptions.put("consumer1", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, emptyTasks, emptyTasks, myEndPoint).encode()));
final Map<String, PartitionAssignor.Assignment> assignments = partitionAssignor.assign(metadata, subscriptions);
final PartitionAssignor.Assignment consumerAssignment = assignments.get("consumer1");
final AssignmentInfo assignmentInfo = AssignmentInfo.decode(consumerAssignment.userData());
final Set<TopicPartition> topicPartitions = assignmentInfo.partitionsByHost.get(new HostInfo("localhost", 8080));
assertEquals(Utils.mkSet(new TopicPartition("topic1", 0), new TopicPartition("topic1", 1), new TopicPartition("topic1", 2)), topicPartitions);
}
Aggregations