use of org.apache.flink.connector.testframe.environment.TestEnvironmentSettings in project flink by apache.
the class SourceTestSuiteBase method testMultipleSplits.
/**
* Test connector source with multiple splits in the external system
*
* <p>This test will create 4 splits in the external system, write test data to all splits, and
* consume back via a Flink job with 4 parallelism.
*
* <p>The number and order of records in each split consumed by Flink need to be identical to
* the test data written into the external system to pass this test. There's no requirement for
* record order across splits.
*
* <p>A bounded source is required for this test.
*/
@TestTemplate
@DisplayName("Test source with multiple splits")
public void testMultipleSplits(TestEnvironment testEnv, DataStreamSourceExternalContext<T> externalContext, CheckpointingMode semantic) throws Exception {
// Step 1: Preparation
TestingSourceSettings sourceSettings = TestingSourceSettings.builder().setBoundedness(Boundedness.BOUNDED).setCheckpointingMode(semantic).build();
TestEnvironmentSettings envOptions = TestEnvironmentSettings.builder().setConnectorJarPaths(externalContext.getConnectorJarPaths()).build();
Source<T, ?, ?> source = tryCreateSource(externalContext, sourceSettings);
// Step 2: Write test data to external system
int splitNumber = 4;
List<List<T>> testRecordsLists = new ArrayList<>();
for (int i = 0; i < splitNumber; i++) {
testRecordsLists.add(generateAndWriteTestData(i, externalContext, sourceSettings));
}
// Step 3: Build and execute Flink job
StreamExecutionEnvironment execEnv = testEnv.createExecutionEnvironment(envOptions);
DataStreamSource<T> stream = execEnv.fromSource(source, WatermarkStrategy.noWatermarks(), "Tested Source").setParallelism(splitNumber);
CollectIteratorBuilder<T> iteratorBuilder = addCollectSink(stream);
JobClient jobClient = submitJob(execEnv, "Source Multiple Split Test");
// Step 4: Validate test data
try (CloseableIterator<T> resultIterator = iteratorBuilder.build(jobClient)) {
// Check test result
LOG.info("Checking test results");
checkResultWithSemantic(resultIterator, testRecordsLists, semantic, null);
}
}
use of org.apache.flink.connector.testframe.environment.TestEnvironmentSettings in project flink by apache.
the class SourceTestSuiteBase method testSourceSingleSplit.
// ----------------------------- Basic test cases ---------------------------------
/**
* Test connector source with only one split in the external system.
*
* <p>This test will create one split in the external system, write test data into it, and
* consume back via a Flink job with 1 parallelism.
*
* <p>The number and order of records consumed by Flink need to be identical to the test data
* written to the external system in order to pass this test.
*
* <p>A bounded source is required for this test.
*/
@TestTemplate
@DisplayName("Test source with single split")
public void testSourceSingleSplit(TestEnvironment testEnv, DataStreamSourceExternalContext<T> externalContext, CheckpointingMode semantic) throws Exception {
// Step 1: Preparation
TestingSourceSettings sourceSettings = TestingSourceSettings.builder().setBoundedness(Boundedness.BOUNDED).setCheckpointingMode(semantic).build();
TestEnvironmentSettings envSettings = TestEnvironmentSettings.builder().setConnectorJarPaths(externalContext.getConnectorJarPaths()).build();
Source<T, ?, ?> source = tryCreateSource(externalContext, sourceSettings);
// Step 2: Write test data to external system
List<T> testRecords = generateAndWriteTestData(0, externalContext, sourceSettings);
// Step 3: Build and execute Flink job
StreamExecutionEnvironment execEnv = testEnv.createExecutionEnvironment(envSettings);
DataStreamSource<T> stream = execEnv.fromSource(source, WatermarkStrategy.noWatermarks(), "Tested Source").setParallelism(1);
CollectIteratorBuilder<T> iteratorBuilder = addCollectSink(stream);
JobClient jobClient = submitJob(execEnv, "Source Single Split Test");
// Step 5: Validate test data
try (CollectResultIterator<T> resultIterator = iteratorBuilder.build(jobClient)) {
// Check test result
LOG.info("Checking test results");
checkResultWithSemantic(resultIterator, Arrays.asList(testRecords), semantic, null);
}
// Step 5: Clean up
waitForJobStatus(jobClient, Collections.singletonList(JobStatus.FINISHED), Deadline.fromNow(DEFAULT_JOB_STATUS_CHANGE_TIMEOUT));
}
Aggregations