use of org.apache.flink.api.connector.source.mocks.MockSplitEnumeratorContext in project flink by apache.
the class KafkaEnumeratorTest method testKafkaClientProperties.
@Test
public void testKafkaClientProperties() throws Exception {
Properties properties = new Properties();
String clientIdPrefix = "test-prefix";
Integer defaultTimeoutMs = 99999;
properties.setProperty(KafkaSourceOptions.CLIENT_ID_PREFIX.key(), clientIdPrefix);
properties.setProperty(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, String.valueOf(defaultTimeoutMs));
try (MockSplitEnumeratorContext<KafkaPartitionSplit> context = new MockSplitEnumeratorContext<>(NUM_SUBTASKS);
KafkaSourceEnumerator enumerator = createEnumerator(context, ENABLE_PERIODIC_PARTITION_DISCOVERY, PRE_EXISTING_TOPICS, Collections.emptySet(), properties)) {
enumerator.start();
AdminClient adminClient = (AdminClient) Whitebox.getInternalState(enumerator, "adminClient");
assertNotNull(adminClient);
String clientId = (String) Whitebox.getInternalState(adminClient, "clientId");
assertNotNull(clientId);
assertTrue(clientId.startsWith(clientIdPrefix));
assertEquals(defaultTimeoutMs, Whitebox.getInternalState(adminClient, "defaultApiTimeoutMs"));
assertNotNull(clientId);
assertTrue(clientId.startsWith(clientIdPrefix));
}
}
use of org.apache.flink.api.connector.source.mocks.MockSplitEnumeratorContext in project flink by apache.
the class KafkaEnumeratorTest method testWorkWithPreexistingAssignments.
@Test
public void testWorkWithPreexistingAssignments() throws Throwable {
Set<TopicPartition> preexistingAssignments;
try (MockSplitEnumeratorContext<KafkaPartitionSplit> context1 = new MockSplitEnumeratorContext<>(NUM_SUBTASKS);
KafkaSourceEnumerator enumerator = createEnumerator(context1, ENABLE_PERIODIC_PARTITION_DISCOVERY)) {
startEnumeratorAndRegisterReaders(context1, enumerator);
preexistingAssignments = asEnumState(context1.getSplitsAssignmentSequence().get(0).assignment());
}
try (MockSplitEnumeratorContext<KafkaPartitionSplit> context2 = new MockSplitEnumeratorContext<>(NUM_SUBTASKS);
KafkaSourceEnumerator enumerator = createEnumerator(context2, ENABLE_PERIODIC_PARTITION_DISCOVERY, PRE_EXISTING_TOPICS, preexistingAssignments, new Properties())) {
enumerator.start();
runPeriodicPartitionDiscovery(context2);
registerReader(context2, enumerator, READER0);
assertTrue(context2.getSplitsAssignmentSequence().isEmpty());
registerReader(context2, enumerator, READER1);
verifyLastReadersAssignments(context2, Collections.singleton(READER1), PRE_EXISTING_TOPICS, 1);
}
}
use of org.apache.flink.api.connector.source.mocks.MockSplitEnumeratorContext in project flink by apache.
the class KafkaEnumeratorTest method testPartitionChangeChecking.
@Test
public void testPartitionChangeChecking() throws Throwable {
try (MockSplitEnumeratorContext<KafkaPartitionSplit> context = new MockSplitEnumeratorContext<>(NUM_SUBTASKS);
KafkaSourceEnumerator enumerator = createEnumerator(context, DISABLE_PERIODIC_PARTITION_DISCOVERY)) {
enumerator.start();
runOneTimePartitionDiscovery(context);
registerReader(context, enumerator, READER0);
verifyLastReadersAssignments(context, Collections.singleton(READER0), PRE_EXISTING_TOPICS, 1);
// All partitions of TOPIC1 and TOPIC2 should have been discovered now
// Check partition change using only DYNAMIC_TOPIC_NAME-0
TopicPartition newPartition = new TopicPartition(DYNAMIC_TOPIC_NAME, 0);
Set<TopicPartition> fetchedPartitions = new HashSet<>();
fetchedPartitions.add(newPartition);
final KafkaSourceEnumerator.PartitionChange partitionChange = enumerator.getPartitionChange(fetchedPartitions);
// Since enumerator never met DYNAMIC_TOPIC_NAME-0, it should be mark as a new partition
Set<TopicPartition> expectedNewPartitions = Collections.singleton(newPartition);
// All existing topics are not in the fetchedPartitions, so they should be marked as
// removed
Set<TopicPartition> expectedRemovedPartitions = new HashSet<>();
for (int i = 0; i < KafkaSourceTestEnv.NUM_PARTITIONS; i++) {
expectedRemovedPartitions.add(new TopicPartition(TOPIC1, i));
expectedRemovedPartitions.add(new TopicPartition(TOPIC2, i));
}
assertEquals(expectedNewPartitions, partitionChange.getNewPartitions());
assertEquals(expectedRemovedPartitions, partitionChange.getRemovedPartitions());
}
}
use of org.apache.flink.api.connector.source.mocks.MockSplitEnumeratorContext in project flink by apache.
the class SourceCoordinatorTest method testErrorThrownFromSplitEnumerator.
@Test
public void testErrorThrownFromSplitEnumerator() throws Exception {
final Error error = new Error("Test Error");
try (final MockSplitEnumeratorContext<MockSourceSplit> enumeratorContext = new MockSplitEnumeratorContext<>(1);
final SplitEnumerator<MockSourceSplit, Set<MockSourceSplit>> splitEnumerator = new MockSplitEnumerator(1, enumeratorContext) {
@Override
public void handleSourceEvent(int subtaskId, SourceEvent sourceEvent) {
throw error;
}
};
final SourceCoordinator<?, ?> coordinator = new SourceCoordinator<>(OPERATOR_NAME, new EnumeratorCreatingSource<>(() -> splitEnumerator), context, new CoordinatorStoreImpl(), WatermarkAlignmentParams.WATERMARK_ALIGNMENT_DISABLED)) {
coordinator.start();
coordinator.handleEventFromOperator(1, new SourceEventWrapper(new SourceEvent() {
}));
waitUtil(() -> operatorCoordinatorContext.isJobFailed(), Duration.ofSeconds(10), "The job should have failed due to the artificial exception.");
assertEquals(error, operatorCoordinatorContext.getJobFailureReason());
}
}
use of org.apache.flink.api.connector.source.mocks.MockSplitEnumeratorContext in project flink by apache.
the class KafkaEnumeratorTest method testDiscoverPartitionsPeriodically.
@Test(timeout = 30000L)
public void testDiscoverPartitionsPeriodically() throws Throwable {
try (MockSplitEnumeratorContext<KafkaPartitionSplit> context = new MockSplitEnumeratorContext<>(NUM_SUBTASKS);
KafkaSourceEnumerator enumerator = createEnumerator(context, ENABLE_PERIODIC_PARTITION_DISCOVERY, INCLUDE_DYNAMIC_TOPIC);
AdminClient adminClient = KafkaSourceTestEnv.getAdminClient()) {
startEnumeratorAndRegisterReaders(context, enumerator);
// invoke partition discovery callable again and there should be no new assignments.
runPeriodicPartitionDiscovery(context);
assertEquals("No assignments should be made because there is no partition change", 2, context.getSplitsAssignmentSequence().size());
// create the dynamic topic.
adminClient.createTopics(Collections.singleton(new NewTopic(DYNAMIC_TOPIC_NAME, NUM_PARTITIONS_DYNAMIC_TOPIC, (short) 1))).all().get();
// invoke partition discovery callable again.
while (true) {
runPeriodicPartitionDiscovery(context);
if (context.getSplitsAssignmentSequence().size() < 3) {
Thread.sleep(10);
} else {
break;
}
}
verifyLastReadersAssignments(context, Arrays.asList(READER0, READER1), Collections.singleton(DYNAMIC_TOPIC_NAME), 3);
} finally {
try (AdminClient adminClient = KafkaSourceTestEnv.getAdminClient()) {
adminClient.deleteTopics(Collections.singleton(DYNAMIC_TOPIC_NAME)).all().get();
} catch (Exception e) {
// Let it go.
}
}
}
Aggregations