use of org.apache.flink.connector.kafka.source.split.KafkaPartitionSplit in project flink by apache.
the class KafkaSourceTestEnv method getSplitsByOwners.
// ------------------- topic information helpers -------------------
public static Map<Integer, Map<String, KafkaPartitionSplit>> getSplitsByOwners(final Collection<String> topics, final int numSubtasks) {
final Map<Integer, Map<String, KafkaPartitionSplit>> splitsByOwners = new HashMap<>();
for (String topic : topics) {
getPartitionsForTopic(topic).forEach(tp -> {
int ownerReader = Math.abs(tp.hashCode()) % numSubtasks;
KafkaPartitionSplit split = new KafkaPartitionSplit(tp, getEarliestOffset(tp), (long) NUM_RECORDS_PER_PARTITION);
splitsByOwners.computeIfAbsent(ownerReader, r -> new HashMap<>()).put(KafkaPartitionSplit.toSplitId(tp), split);
});
}
return splitsByOwners;
}
use of org.apache.flink.connector.kafka.source.split.KafkaPartitionSplit in project flink by apache.
the class KafkaEnumeratorTest method testKafkaClientProperties.
@Test
public void testKafkaClientProperties() throws Exception {
Properties properties = new Properties();
String clientIdPrefix = "test-prefix";
Integer defaultTimeoutMs = 99999;
properties.setProperty(KafkaSourceOptions.CLIENT_ID_PREFIX.key(), clientIdPrefix);
properties.setProperty(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, String.valueOf(defaultTimeoutMs));
try (MockSplitEnumeratorContext<KafkaPartitionSplit> context = new MockSplitEnumeratorContext<>(NUM_SUBTASKS);
KafkaSourceEnumerator enumerator = createEnumerator(context, ENABLE_PERIODIC_PARTITION_DISCOVERY, PRE_EXISTING_TOPICS, Collections.emptySet(), properties)) {
enumerator.start();
AdminClient adminClient = (AdminClient) Whitebox.getInternalState(enumerator, "adminClient");
assertNotNull(adminClient);
String clientId = (String) Whitebox.getInternalState(adminClient, "clientId");
assertNotNull(clientId);
assertTrue(clientId.startsWith(clientIdPrefix));
assertEquals(defaultTimeoutMs, Whitebox.getInternalState(adminClient, "defaultApiTimeoutMs"));
assertNotNull(clientId);
assertTrue(clientId.startsWith(clientIdPrefix));
}
}
use of org.apache.flink.connector.kafka.source.split.KafkaPartitionSplit in project flink by apache.
the class KafkaEnumeratorTest method verifyAssignments.
private void verifyAssignments(Map<Integer, Set<TopicPartition>> expectedAssignments, Map<Integer, List<KafkaPartitionSplit>> actualAssignments) {
actualAssignments.forEach((reader, splits) -> {
Set<TopicPartition> expectedAssignmentsForReader = expectedAssignments.get(reader);
assertNotNull(expectedAssignmentsForReader);
assertEquals(expectedAssignmentsForReader.size(), splits.size());
for (KafkaPartitionSplit split : splits) {
assertTrue(expectedAssignmentsForReader.contains(split.getTopicPartition()));
}
});
}
use of org.apache.flink.connector.kafka.source.split.KafkaPartitionSplit in project flink by apache.
the class KafkaEnumeratorTest method testWorkWithPreexistingAssignments.
@Test
public void testWorkWithPreexistingAssignments() throws Throwable {
Set<TopicPartition> preexistingAssignments;
try (MockSplitEnumeratorContext<KafkaPartitionSplit> context1 = new MockSplitEnumeratorContext<>(NUM_SUBTASKS);
KafkaSourceEnumerator enumerator = createEnumerator(context1, ENABLE_PERIODIC_PARTITION_DISCOVERY)) {
startEnumeratorAndRegisterReaders(context1, enumerator);
preexistingAssignments = asEnumState(context1.getSplitsAssignmentSequence().get(0).assignment());
}
try (MockSplitEnumeratorContext<KafkaPartitionSplit> context2 = new MockSplitEnumeratorContext<>(NUM_SUBTASKS);
KafkaSourceEnumerator enumerator = createEnumerator(context2, ENABLE_PERIODIC_PARTITION_DISCOVERY, PRE_EXISTING_TOPICS, preexistingAssignments, new Properties())) {
enumerator.start();
runPeriodicPartitionDiscovery(context2);
registerReader(context2, enumerator, READER0);
assertTrue(context2.getSplitsAssignmentSequence().isEmpty());
registerReader(context2, enumerator, READER1);
verifyLastReadersAssignments(context2, Collections.singleton(READER1), PRE_EXISTING_TOPICS, 1);
}
}
use of org.apache.flink.connector.kafka.source.split.KafkaPartitionSplit in project flink by apache.
the class KafkaEnumeratorTest method testPartitionChangeChecking.
@Test
public void testPartitionChangeChecking() throws Throwable {
try (MockSplitEnumeratorContext<KafkaPartitionSplit> context = new MockSplitEnumeratorContext<>(NUM_SUBTASKS);
KafkaSourceEnumerator enumerator = createEnumerator(context, DISABLE_PERIODIC_PARTITION_DISCOVERY)) {
enumerator.start();
runOneTimePartitionDiscovery(context);
registerReader(context, enumerator, READER0);
verifyLastReadersAssignments(context, Collections.singleton(READER0), PRE_EXISTING_TOPICS, 1);
// All partitions of TOPIC1 and TOPIC2 should have been discovered now
// Check partition change using only DYNAMIC_TOPIC_NAME-0
TopicPartition newPartition = new TopicPartition(DYNAMIC_TOPIC_NAME, 0);
Set<TopicPartition> fetchedPartitions = new HashSet<>();
fetchedPartitions.add(newPartition);
final KafkaSourceEnumerator.PartitionChange partitionChange = enumerator.getPartitionChange(fetchedPartitions);
// Since enumerator never met DYNAMIC_TOPIC_NAME-0, it should be mark as a new partition
Set<TopicPartition> expectedNewPartitions = Collections.singleton(newPartition);
// All existing topics are not in the fetchedPartitions, so they should be marked as
// removed
Set<TopicPartition> expectedRemovedPartitions = new HashSet<>();
for (int i = 0; i < KafkaSourceTestEnv.NUM_PARTITIONS; i++) {
expectedRemovedPartitions.add(new TopicPartition(TOPIC1, i));
expectedRemovedPartitions.add(new TopicPartition(TOPIC2, i));
}
assertEquals(expectedNewPartitions, partitionChange.getNewPartitions());
assertEquals(expectedRemovedPartitions, partitionChange.getRemovedPartitions());
}
}
Aggregations