use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class StreamPartitionAssignorTest method checkAssignment.
private AssignmentInfo checkAssignment(Set<String> expectedTopics, PartitionAssignor.Assignment assignment) {
// This assumed 1) DefaultPartitionGrouper is used, and 2) there is a only one topic group.
AssignmentInfo info = AssignmentInfo.decode(assignment.userData());
// check if the number of assigned partitions == the size of active task id list
assertEquals(assignment.partitions().size(), info.activeTasks.size());
// check if active tasks are consistent
List<TaskId> activeTasks = new ArrayList<>();
Set<String> activeTopics = new HashSet<>();
for (TopicPartition partition : assignment.partitions()) {
// since default grouper, taskid.partition == partition.partition()
activeTasks.add(new TaskId(0, partition.partition()));
activeTopics.add(partition.topic());
}
assertEquals(activeTasks, info.activeTasks);
// check if active partitions cover all topics
assertEquals(expectedTopics, activeTopics);
// check if standby tasks are consistent
Set<String> standbyTopics = new HashSet<>();
for (Map.Entry<TaskId, Set<TopicPartition>> entry : info.standbyTasks.entrySet()) {
TaskId id = entry.getKey();
Set<TopicPartition> partitions = entry.getValue();
for (TopicPartition partition : partitions) {
// since default grouper, taskid.partition == partition.partition()
assertEquals(id.partition, partition.partition());
standbyTopics.add(partition.topic());
}
}
if (info.standbyTasks.size() > 0)
// check if standby partitions cover all topics
assertEquals(expectedTopics, standbyTopics);
return info;
}
use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class StreamPartitionAssignorTest method shouldNotAddStandbyTaskPartitionsToPartitionsForHost.
@Test
public void shouldNotAddStandbyTaskPartitionsToPartitionsForHost() throws Exception {
final Properties props = configProps();
props.setProperty(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, "1");
final StreamsConfig config = new StreamsConfig(props);
final KStreamBuilder builder = new KStreamBuilder();
final String applicationId = "appId";
builder.setApplicationId(applicationId);
builder.stream("topic1").groupByKey().count("count");
final UUID uuid = UUID.randomUUID();
final String client = "client1";
final StreamThread streamThread = new StreamThread(builder, config, mockClientSupplier, applicationId, client, uuid, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
partitionAssignor.configure(config.getConsumerConfigs(streamThread, applicationId, client));
partitionAssignor.setInternalTopicManager(new MockInternalTopicManager(streamThread.config, mockClientSupplier.restoreConsumer));
final Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
final Set<TaskId> emptyTasks = Collections.emptySet();
subscriptions.put("consumer1", new PartitionAssignor.Subscription(Collections.singletonList("topic1"), new SubscriptionInfo(uuid, emptyTasks, emptyTasks, userEndPoint).encode()));
subscriptions.put("consumer2", new PartitionAssignor.Subscription(Collections.singletonList("topic1"), new SubscriptionInfo(UUID.randomUUID(), emptyTasks, emptyTasks, "other:9090").encode()));
final Set<TopicPartition> allPartitions = Utils.mkSet(t1p0, t1p1, t1p2);
final Map<String, PartitionAssignor.Assignment> assign = partitionAssignor.assign(metadata, subscriptions);
final PartitionAssignor.Assignment consumer1Assignment = assign.get("consumer1");
final AssignmentInfo assignmentInfo = AssignmentInfo.decode(consumer1Assignment.userData());
final Set<TopicPartition> consumer1partitions = assignmentInfo.partitionsByHost.get(new HostInfo("localhost", 2171));
final Set<TopicPartition> consumer2Partitions = assignmentInfo.partitionsByHost.get(new HostInfo("other", 9090));
final HashSet<TopicPartition> allAssignedPartitions = new HashSet<>(consumer1partitions);
allAssignedPartitions.addAll(consumer2Partitions);
assertThat(consumer1partitions, not(allPartitions));
assertThat(consumer2Partitions, not(allPartitions));
assertThat(allAssignedPartitions, equalTo(allPartitions));
}
use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class StreamPartitionAssignorTest method shouldSetClusterMetadataOnAssignment.
@Test
public void shouldSetClusterMetadataOnAssignment() throws Exception {
final List<TopicPartition> topic = Collections.singletonList(new TopicPartition("topic", 0));
final Map<HostInfo, Set<TopicPartition>> hostState = Collections.singletonMap(new HostInfo("localhost", 80), Collections.singleton(new TopicPartition("topic", 0)));
final AssignmentInfo assignmentInfo = new AssignmentInfo(Collections.singletonList(new TaskId(0, 0)), Collections.<TaskId, Set<TopicPartition>>emptyMap(), hostState);
partitionAssignor.onAssignment(new PartitionAssignor.Assignment(topic, assignmentInfo.encode()));
final Cluster cluster = partitionAssignor.clusterMetadata();
final List<PartitionInfo> partitionInfos = cluster.partitionsForTopic("topic");
final PartitionInfo partitionInfo = partitionInfos.get(0);
assertEquals(1, partitionInfos.size());
assertEquals("topic", partitionInfo.topic());
assertEquals(0, partitionInfo.partition());
}
use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class ProcessorStateManagerTest method testFlushAndClose.
@Test
public void testFlushAndClose() throws IOException {
checkpoint.write(Collections.<TopicPartition, Long>emptyMap());
// set up ack'ed offsets
final HashMap<TopicPartition, Long> ackedOffsets = new HashMap<>();
ackedOffsets.put(new TopicPartition(persistentStoreTopicName, 1), 123L);
ackedOffsets.put(new TopicPartition(nonPersistentStoreTopicName, 1), 456L);
ackedOffsets.put(new TopicPartition(ProcessorStateManager.storeChangelogTopic(applicationId, "otherTopic"), 1), 789L);
ProcessorStateManager stateMgr = new ProcessorStateManager(taskId, noPartitions, false, stateDirectory, new HashMap<String, String>() {
{
put(persistentStoreName, persistentStoreTopicName);
put(nonPersistentStoreName, nonPersistentStoreTopicName);
}
}, changelogReader);
try {
// make sure the checkpoint file isn't deleted
assertTrue(checkpointFile.exists());
stateMgr.register(persistentStore, true, persistentStore.stateRestoreCallback);
stateMgr.register(nonPersistentStore, true, nonPersistentStore.stateRestoreCallback);
} finally {
// close the state manager with the ack'ed offsets
stateMgr.flush(new MockProcessorContext(StateSerdes.withBuiltinTypes("foo", String.class, String.class), new NoOpRecordCollector()));
stateMgr.close(ackedOffsets);
}
// make sure all stores are closed, and the checkpoint file is written.
assertTrue(persistentStore.flushed);
assertTrue(persistentStore.closed);
assertTrue(nonPersistentStore.flushed);
assertTrue(nonPersistentStore.closed);
assertTrue(checkpointFile.exists());
// the checkpoint file should contain an offset from the persistent store only.
final Map<TopicPartition, Long> checkpointedOffsets = checkpoint.read();
assertEquals(1, checkpointedOffsets.size());
assertEquals(new Long(124), checkpointedOffsets.get(new TopicPartition(persistentStoreTopicName, 1)));
}
use of org.apache.kafka.common.TopicPartition in project kafka by apache.
the class ProcessorStateManagerTest method shouldNotWriteCheckpointForNonPersistent.
@Test
public void shouldNotWriteCheckpointForNonPersistent() throws Exception {
final TopicPartition topicPartition = new TopicPartition(nonPersistentStoreTopicName, 1);
final ProcessorStateManager stateMgr = new ProcessorStateManager(taskId, noPartitions, true, stateDirectory, Collections.singletonMap(nonPersistentStoreName, nonPersistentStoreTopicName), changelogReader);
stateMgr.register(nonPersistentStore, true, nonPersistentStore.stateRestoreCallback);
stateMgr.checkpoint(Collections.singletonMap(topicPartition, 876L));
final Map<TopicPartition, Long> read = checkpoint.read();
assertThat(read, equalTo(Collections.<TopicPartition, Long>emptyMap()));
}
Aggregations