use of org.apache.flink.connector.testframe.environment.TestEnvironmentSettings in project flink by apache.
the class UnorderedSourceTestSuiteBase method testOneSplitWithMultipleConsumers.
@TestTemplate
@DisplayName("Test source with one split and four consumers")
public void testOneSplitWithMultipleConsumers(TestEnvironment testEnv, DataStreamSourceExternalContext<T> externalContext) throws Exception {
TestingSourceSettings sourceSettings = TestingSourceSettings.builder().setBoundedness(Boundedness.BOUNDED).setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE).build();
TestEnvironmentSettings envOptions = TestEnvironmentSettings.builder().setConnectorJarPaths(externalContext.getConnectorJarPaths()).build();
List<T> testData = externalContext.generateTestData(sourceSettings, 0, ThreadLocalRandom.current().nextLong());
ExternalSystemSplitDataWriter<T> writer = externalContext.createSourceSplitDataWriter(sourceSettings);
writer.writeRecords(testData);
Source<T, ?, ?> source = externalContext.createSource(sourceSettings);
StreamExecutionEnvironment execEnv = testEnv.createExecutionEnvironment(envOptions);
List<T> results = execEnv.fromSource(source, WatermarkStrategy.noWatermarks(), "Pulsar source").setParallelism(4).executeAndCollect("Source single split with four readers.", testData.size());
assertThat(results, containsInAnyOrder(testData.toArray()));
}
use of org.apache.flink.connector.testframe.environment.TestEnvironmentSettings in project flink by apache.
the class SourceTestSuiteBase method restartFromSavepoint.
private void restartFromSavepoint(TestEnvironment testEnv, DataStreamSourceExternalContext<T> externalContext, CheckpointingMode semantic, final int splitNumber, final int beforeParallelism, final int afterParallelism) throws Exception {
// Step 1: Preparation
TestingSourceSettings sourceSettings = TestingSourceSettings.builder().setBoundedness(Boundedness.CONTINUOUS_UNBOUNDED).setCheckpointingMode(semantic).build();
TestEnvironmentSettings envOptions = TestEnvironmentSettings.builder().setConnectorJarPaths(externalContext.getConnectorJarPaths()).build();
// Step 2: Generate test data
final List<ExternalSystemSplitDataWriter<T>> writers = new ArrayList<>();
final List<List<T>> testRecordCollections = new ArrayList<>();
for (int i = 0; i < splitNumber; i++) {
writers.add(externalContext.createSourceSplitDataWriter(sourceSettings));
testRecordCollections.add(generateTestDataForWriter(externalContext, sourceSettings, i, writers.get(i)));
}
// Step 3: Build and execute Flink job
final StreamExecutionEnvironment execEnv = testEnv.createExecutionEnvironment(envOptions);
execEnv.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
execEnv.enableCheckpointing(50);
execEnv.setRestartStrategy(RestartStrategies.noRestart());
DataStreamSource<T> source = execEnv.fromSource(tryCreateSource(externalContext, sourceSettings), WatermarkStrategy.noWatermarks(), "Tested Source").setParallelism(beforeParallelism);
CollectIteratorBuilder<T> iteratorBuilder = addCollectSink(source);
final JobClient jobClient = execEnv.executeAsync("Restart Test");
// Step 4: Check the result and stop Flink job with a savepoint
CollectResultIterator<T> iterator = null;
try {
iterator = iteratorBuilder.build(jobClient);
checkResultWithSemantic(iterator, testRecordCollections, semantic, getTestDataSize(testRecordCollections));
} catch (Exception e) {
killJob(jobClient);
throw e;
}
String savepointPath = jobClient.stopWithSavepoint(true, testEnv.getCheckpointUri(), SavepointFormatType.CANONICAL).get(30, TimeUnit.SECONDS);
waitForJobStatus(jobClient, Collections.singletonList(JobStatus.FINISHED), Deadline.fromNow(DEFAULT_JOB_STATUS_CHANGE_TIMEOUT));
// Step 5: Generate new test data
final List<List<T>> newTestRecordCollections = new ArrayList<>();
for (int i = 0; i < splitNumber; i++) {
newTestRecordCollections.add(generateTestDataForWriter(externalContext, sourceSettings, i, writers.get(i)));
}
// Step 6: restart the Flink job with the savepoint
TestEnvironmentSettings restartEnvOptions = TestEnvironmentSettings.builder().setConnectorJarPaths(externalContext.getConnectorJarPaths()).setSavepointRestorePath(savepointPath).build();
final StreamExecutionEnvironment restartEnv = testEnv.createExecutionEnvironment(restartEnvOptions);
restartEnv.enableCheckpointing(500);
restartEnv.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
DataStreamSource<T> restartSource = restartEnv.fromSource(tryCreateSource(externalContext, sourceSettings), WatermarkStrategy.noWatermarks(), "Tested Source").setParallelism(afterParallelism);
addCollectSink(restartSource);
final JobClient restartJobClient = restartEnv.executeAsync("Restart Test");
waitForJobStatus(restartJobClient, Collections.singletonList(JobStatus.RUNNING), Deadline.fromNow(DEFAULT_JOB_STATUS_CHANGE_TIMEOUT));
try {
iterator.setJobClient(restartJobClient);
/*
* Use the same iterator as the previous run, because the CollectStreamSink will snapshot
* its state and recover from it.
*
* The fetcher in CollectResultIterator is responsible for comminicating with
* the CollectSinkFunction, and deal the result with CheckpointedCollectResultBuffer
* in EXACTLY_ONCE semantic.
*/
checkResultWithSemantic(iterator, newTestRecordCollections, semantic, getTestDataSize(newTestRecordCollections));
} finally {
// Clean up
killJob(restartJobClient);
iterator.close();
}
}
use of org.apache.flink.connector.testframe.environment.TestEnvironmentSettings in project flink by apache.
the class SourceTestSuiteBase method testSourceMetrics.
/**
* Test connector source metrics.
*
* <p>This test will create 4 splits in the external system first, write test data to all splits
* and consume back via a Flink job with parallelism 4. Then read and compare the metrics.
*
* <p>Now test: numRecordsIn
*/
@TestTemplate
@DisplayName("Test source metrics")
public void testSourceMetrics(TestEnvironment testEnv, DataStreamSourceExternalContext<T> externalContext, CheckpointingMode semantic) throws Exception {
TestingSourceSettings sourceSettings = TestingSourceSettings.builder().setBoundedness(Boundedness.CONTINUOUS_UNBOUNDED).setCheckpointingMode(semantic).build();
TestEnvironmentSettings envOptions = TestEnvironmentSettings.builder().setConnectorJarPaths(externalContext.getConnectorJarPaths()).build();
final int splitNumber = 4;
final List<List<T>> testRecordCollections = new ArrayList<>();
for (int i = 0; i < splitNumber; i++) {
testRecordCollections.add(generateAndWriteTestData(i, externalContext, sourceSettings));
}
// make sure use different names when executes multi times
String sourceName = "metricTestSource" + testRecordCollections.hashCode();
final StreamExecutionEnvironment env = testEnv.createExecutionEnvironment(envOptions);
final DataStreamSource<T> dataStreamSource = env.fromSource(tryCreateSource(externalContext, sourceSettings), WatermarkStrategy.noWatermarks(), sourceName).setParallelism(splitNumber);
dataStreamSource.addSink(new DiscardingSink<>());
final JobClient jobClient = env.executeAsync("Metrics Test");
final MetricQuerier queryRestClient = new MetricQuerier(new Configuration());
final ExecutorService executorService = Executors.newCachedThreadPool();
try {
waitForAllTaskRunning(() -> getJobDetails(new RestClient(new Configuration(), executorService), testEnv.getRestEndpoint(), jobClient.getJobID()), Deadline.fromNow(DEFAULT_JOB_STATUS_CHANGE_TIMEOUT));
waitUntilCondition(() -> {
// test metrics
try {
return checkSourceMetrics(queryRestClient, testEnv, jobClient.getJobID(), sourceName, getTestDataSize(testRecordCollections));
} catch (Exception e) {
// skip failed assert try
return false;
}
}, Deadline.fromNow(DEFAULT_COLLECT_DATA_TIMEOUT));
} finally {
// Clean up
executorService.shutdown();
killJob(jobClient);
}
}
use of org.apache.flink.connector.testframe.environment.TestEnvironmentSettings in project flink by apache.
the class SourceTestSuiteBase method testTaskManagerFailure.
/**
* Test connector source with task manager failover.
*
* <p>This test will create 1 split in the external system, write test record set A into the
* split, restart task manager to trigger job failover, write test record set B into the split,
* and terminate the Flink job finally.
*
* <p>The number and order of records consumed by Flink should be identical to A before the
* failover and B after the failover in order to pass the test.
*
* <p>An unbounded source is required for this test, since TaskManager failover will be
* triggered in the middle of the test.
*/
@TestTemplate
@DisplayName("Test TaskManager failure")
public void testTaskManagerFailure(TestEnvironment testEnv, DataStreamSourceExternalContext<T> externalContext, ClusterControllable controller, CheckpointingMode semantic) throws Exception {
// Step 1: Preparation
TestingSourceSettings sourceSettings = TestingSourceSettings.builder().setBoundedness(Boundedness.CONTINUOUS_UNBOUNDED).setCheckpointingMode(semantic).build();
TestEnvironmentSettings envOptions = TestEnvironmentSettings.builder().setConnectorJarPaths(externalContext.getConnectorJarPaths()).build();
Source<T, ?, ?> source = tryCreateSource(externalContext, sourceSettings);
// Step 2: Write test data to external system
int splitIndex = 0;
List<T> testRecordsBeforeFailure = externalContext.generateTestData(sourceSettings, splitIndex, ThreadLocalRandom.current().nextLong());
ExternalSystemSplitDataWriter<T> externalSystemSplitDataWriter = externalContext.createSourceSplitDataWriter(sourceSettings);
LOG.info("Writing {} records for split {} to external system", testRecordsBeforeFailure.size(), splitIndex);
externalSystemSplitDataWriter.writeRecords(testRecordsBeforeFailure);
// Step 3: Build and execute Flink job
StreamExecutionEnvironment execEnv = testEnv.createExecutionEnvironment(envOptions);
execEnv.enableCheckpointing(50);
DataStreamSource<T> stream = execEnv.fromSource(source, WatermarkStrategy.noWatermarks(), "Tested Source").setParallelism(1);
CollectIteratorBuilder<T> iteratorBuilder = addCollectSink(stream);
JobClient jobClient = submitJob(execEnv, "TaskManager Failover Test");
// Step 4: Validate records before killing TaskManagers
CloseableIterator<T> iterator = iteratorBuilder.build(jobClient);
LOG.info("Checking records before killing TaskManagers");
checkResultWithSemantic(iterator, Arrays.asList(testRecordsBeforeFailure), semantic, testRecordsBeforeFailure.size());
// Step 5: Trigger TaskManager failover
LOG.info("Trigger TaskManager failover");
controller.triggerTaskManagerFailover(jobClient, () -> {
});
LOG.info("Waiting for job recovering from failure");
waitForJobStatus(jobClient, Collections.singletonList(JobStatus.RUNNING), Deadline.fromNow(DEFAULT_JOB_STATUS_CHANGE_TIMEOUT));
// Step 6: Write test data again to external system
List<T> testRecordsAfterFailure = externalContext.generateTestData(sourceSettings, splitIndex, ThreadLocalRandom.current().nextLong());
LOG.info("Writing {} records for split {} to external system", testRecordsAfterFailure.size(), splitIndex);
externalSystemSplitDataWriter.writeRecords(testRecordsAfterFailure);
// Step 7: Validate test result
LOG.info("Checking records after job failover");
checkResultWithSemantic(iterator, Arrays.asList(testRecordsAfterFailure), semantic, testRecordsAfterFailure.size());
// Step 8: Clean up
terminateJob(jobClient);
waitForJobStatus(jobClient, Collections.singletonList(JobStatus.CANCELED), Deadline.fromNow(DEFAULT_JOB_STATUS_CHANGE_TIMEOUT));
iterator.close();
}
use of org.apache.flink.connector.testframe.environment.TestEnvironmentSettings in project flink by apache.
the class SourceTestSuiteBase method testIdleReader.
/**
* Test connector source with an idle reader.
*
* <p>This test will create 4 split in the external system, write test data to all splits, and
* consume back via a Flink job with 5 parallelism, so at least one parallelism / source reader
* will be idle (assigned with no splits). If the split enumerator of the source doesn't signal
* NoMoreSplitsEvent to the idle source reader, the Flink job will never spin to FINISHED state.
*
* <p>The number and order of records in each split consumed by Flink need to be identical to
* the test data written into the external system to pass this test. There's no requirement for
* record order across splits.
*
* <p>A bounded source is required for this test.
*/
@TestTemplate
@DisplayName("Test source with at least one idle parallelism")
public void testIdleReader(TestEnvironment testEnv, DataStreamSourceExternalContext<T> externalContext, CheckpointingMode semantic) throws Exception {
// Step 1: Preparation
TestingSourceSettings sourceSettings = TestingSourceSettings.builder().setBoundedness(Boundedness.BOUNDED).setCheckpointingMode(semantic).build();
TestEnvironmentSettings envOptions = TestEnvironmentSettings.builder().setConnectorJarPaths(externalContext.getConnectorJarPaths()).build();
Source<T, ?, ?> source = tryCreateSource(externalContext, sourceSettings);
// Step 2: Write test data to external system
int splitNumber = 4;
List<List<T>> testRecordsLists = new ArrayList<>();
for (int i = 0; i < splitNumber; i++) {
testRecordsLists.add(generateAndWriteTestData(i, externalContext, sourceSettings));
}
// Step 3: Build and execute Flink job
StreamExecutionEnvironment execEnv = testEnv.createExecutionEnvironment(envOptions);
DataStreamSource<T> stream = execEnv.fromSource(source, WatermarkStrategy.noWatermarks(), "Tested Source").setParallelism(splitNumber + 1);
CollectIteratorBuilder<T> iteratorBuilder = addCollectSink(stream);
JobClient jobClient = submitJob(execEnv, "Idle Reader Test");
// Step 4: Validate test data
try (CloseableIterator<T> resultIterator = iteratorBuilder.build(jobClient)) {
LOG.info("Checking test results");
checkResultWithSemantic(resultIterator, testRecordsLists, semantic, null);
}
// Step 5: Clean up
waitForJobStatus(jobClient, Collections.singletonList(JobStatus.FINISHED), Deadline.fromNow(DEFAULT_JOB_STATUS_CHANGE_TIMEOUT));
}
Aggregations