use of org.apache.flink.core.execution.JobClient in project flink by apache.
the class SourceTestSuiteBase method testSourceMetrics.
/**
* Test connector source metrics.
*
* <p>This test will create 4 splits in the external system first, write test data to all splits
* and consume back via a Flink job with parallelism 4. Then read and compare the metrics.
*
* <p>Now test: numRecordsIn
*/
@TestTemplate
@DisplayName("Test source metrics")
public void testSourceMetrics(TestEnvironment testEnv, DataStreamSourceExternalContext<T> externalContext, CheckpointingMode semantic) throws Exception {
TestingSourceSettings sourceSettings = TestingSourceSettings.builder().setBoundedness(Boundedness.CONTINUOUS_UNBOUNDED).setCheckpointingMode(semantic).build();
TestEnvironmentSettings envOptions = TestEnvironmentSettings.builder().setConnectorJarPaths(externalContext.getConnectorJarPaths()).build();
final int splitNumber = 4;
final List<List<T>> testRecordCollections = new ArrayList<>();
for (int i = 0; i < splitNumber; i++) {
testRecordCollections.add(generateAndWriteTestData(i, externalContext, sourceSettings));
}
// make sure use different names when executes multi times
String sourceName = "metricTestSource" + testRecordCollections.hashCode();
final StreamExecutionEnvironment env = testEnv.createExecutionEnvironment(envOptions);
final DataStreamSource<T> dataStreamSource = env.fromSource(tryCreateSource(externalContext, sourceSettings), WatermarkStrategy.noWatermarks(), sourceName).setParallelism(splitNumber);
dataStreamSource.addSink(new DiscardingSink<>());
final JobClient jobClient = env.executeAsync("Metrics Test");
final MetricQuerier queryRestClient = new MetricQuerier(new Configuration());
final ExecutorService executorService = Executors.newCachedThreadPool();
try {
waitForAllTaskRunning(() -> getJobDetails(new RestClient(new Configuration(), executorService), testEnv.getRestEndpoint(), jobClient.getJobID()), Deadline.fromNow(DEFAULT_JOB_STATUS_CHANGE_TIMEOUT));
waitUntilCondition(() -> {
// test metrics
try {
return checkSourceMetrics(queryRestClient, testEnv, jobClient.getJobID(), sourceName, getTestDataSize(testRecordCollections));
} catch (Exception e) {
// skip failed assert try
return false;
}
}, Deadline.fromNow(DEFAULT_COLLECT_DATA_TIMEOUT));
} finally {
// Clean up
executorService.shutdown();
killJob(jobClient);
}
}
use of org.apache.flink.core.execution.JobClient in project flink by apache.
the class SourceTestSuiteBase method testTaskManagerFailure.
/**
* Test connector source with task manager failover.
*
* <p>This test will create 1 split in the external system, write test record set A into the
* split, restart task manager to trigger job failover, write test record set B into the split,
* and terminate the Flink job finally.
*
* <p>The number and order of records consumed by Flink should be identical to A before the
* failover and B after the failover in order to pass the test.
*
* <p>An unbounded source is required for this test, since TaskManager failover will be
* triggered in the middle of the test.
*/
@TestTemplate
@DisplayName("Test TaskManager failure")
public void testTaskManagerFailure(TestEnvironment testEnv, DataStreamSourceExternalContext<T> externalContext, ClusterControllable controller, CheckpointingMode semantic) throws Exception {
// Step 1: Preparation
TestingSourceSettings sourceSettings = TestingSourceSettings.builder().setBoundedness(Boundedness.CONTINUOUS_UNBOUNDED).setCheckpointingMode(semantic).build();
TestEnvironmentSettings envOptions = TestEnvironmentSettings.builder().setConnectorJarPaths(externalContext.getConnectorJarPaths()).build();
Source<T, ?, ?> source = tryCreateSource(externalContext, sourceSettings);
// Step 2: Write test data to external system
int splitIndex = 0;
List<T> testRecordsBeforeFailure = externalContext.generateTestData(sourceSettings, splitIndex, ThreadLocalRandom.current().nextLong());
ExternalSystemSplitDataWriter<T> externalSystemSplitDataWriter = externalContext.createSourceSplitDataWriter(sourceSettings);
LOG.info("Writing {} records for split {} to external system", testRecordsBeforeFailure.size(), splitIndex);
externalSystemSplitDataWriter.writeRecords(testRecordsBeforeFailure);
// Step 3: Build and execute Flink job
StreamExecutionEnvironment execEnv = testEnv.createExecutionEnvironment(envOptions);
execEnv.enableCheckpointing(50);
DataStreamSource<T> stream = execEnv.fromSource(source, WatermarkStrategy.noWatermarks(), "Tested Source").setParallelism(1);
CollectIteratorBuilder<T> iteratorBuilder = addCollectSink(stream);
JobClient jobClient = submitJob(execEnv, "TaskManager Failover Test");
// Step 4: Validate records before killing TaskManagers
CloseableIterator<T> iterator = iteratorBuilder.build(jobClient);
LOG.info("Checking records before killing TaskManagers");
checkResultWithSemantic(iterator, Arrays.asList(testRecordsBeforeFailure), semantic, testRecordsBeforeFailure.size());
// Step 5: Trigger TaskManager failover
LOG.info("Trigger TaskManager failover");
controller.triggerTaskManagerFailover(jobClient, () -> {
});
LOG.info("Waiting for job recovering from failure");
waitForJobStatus(jobClient, Collections.singletonList(JobStatus.RUNNING), Deadline.fromNow(DEFAULT_JOB_STATUS_CHANGE_TIMEOUT));
// Step 6: Write test data again to external system
List<T> testRecordsAfterFailure = externalContext.generateTestData(sourceSettings, splitIndex, ThreadLocalRandom.current().nextLong());
LOG.info("Writing {} records for split {} to external system", testRecordsAfterFailure.size(), splitIndex);
externalSystemSplitDataWriter.writeRecords(testRecordsAfterFailure);
// Step 7: Validate test result
LOG.info("Checking records after job failover");
checkResultWithSemantic(iterator, Arrays.asList(testRecordsAfterFailure), semantic, testRecordsAfterFailure.size());
// Step 8: Clean up
terminateJob(jobClient);
waitForJobStatus(jobClient, Collections.singletonList(JobStatus.CANCELED), Deadline.fromNow(DEFAULT_JOB_STATUS_CHANGE_TIMEOUT));
iterator.close();
}
use of org.apache.flink.core.execution.JobClient in project flink by apache.
the class SourceTestSuiteBase method testIdleReader.
/**
* Test connector source with an idle reader.
*
* <p>This test will create 4 split in the external system, write test data to all splits, and
* consume back via a Flink job with 5 parallelism, so at least one parallelism / source reader
* will be idle (assigned with no splits). If the split enumerator of the source doesn't signal
* NoMoreSplitsEvent to the idle source reader, the Flink job will never spin to FINISHED state.
*
* <p>The number and order of records in each split consumed by Flink need to be identical to
* the test data written into the external system to pass this test. There's no requirement for
* record order across splits.
*
* <p>A bounded source is required for this test.
*/
@TestTemplate
@DisplayName("Test source with at least one idle parallelism")
public void testIdleReader(TestEnvironment testEnv, DataStreamSourceExternalContext<T> externalContext, CheckpointingMode semantic) throws Exception {
// Step 1: Preparation
TestingSourceSettings sourceSettings = TestingSourceSettings.builder().setBoundedness(Boundedness.BOUNDED).setCheckpointingMode(semantic).build();
TestEnvironmentSettings envOptions = TestEnvironmentSettings.builder().setConnectorJarPaths(externalContext.getConnectorJarPaths()).build();
Source<T, ?, ?> source = tryCreateSource(externalContext, sourceSettings);
// Step 2: Write test data to external system
int splitNumber = 4;
List<List<T>> testRecordsLists = new ArrayList<>();
for (int i = 0; i < splitNumber; i++) {
testRecordsLists.add(generateAndWriteTestData(i, externalContext, sourceSettings));
}
// Step 3: Build and execute Flink job
StreamExecutionEnvironment execEnv = testEnv.createExecutionEnvironment(envOptions);
DataStreamSource<T> stream = execEnv.fromSource(source, WatermarkStrategy.noWatermarks(), "Tested Source").setParallelism(splitNumber + 1);
CollectIteratorBuilder<T> iteratorBuilder = addCollectSink(stream);
JobClient jobClient = submitJob(execEnv, "Idle Reader Test");
// Step 4: Validate test data
try (CloseableIterator<T> resultIterator = iteratorBuilder.build(jobClient)) {
LOG.info("Checking test results");
checkResultWithSemantic(resultIterator, testRecordsLists, semantic, null);
}
// Step 5: Clean up
waitForJobStatus(jobClient, Collections.singletonList(JobStatus.FINISHED), Deadline.fromNow(DEFAULT_JOB_STATUS_CHANGE_TIMEOUT));
}
use of org.apache.flink.core.execution.JobClient in project flink by apache.
the class SourceTestSuiteBase method testMultipleSplits.
/**
* Test connector source with multiple splits in the external system
*
* <p>This test will create 4 splits in the external system, write test data to all splits, and
* consume back via a Flink job with 4 parallelism.
*
* <p>The number and order of records in each split consumed by Flink need to be identical to
* the test data written into the external system to pass this test. There's no requirement for
* record order across splits.
*
* <p>A bounded source is required for this test.
*/
@TestTemplate
@DisplayName("Test source with multiple splits")
public void testMultipleSplits(TestEnvironment testEnv, DataStreamSourceExternalContext<T> externalContext, CheckpointingMode semantic) throws Exception {
// Step 1: Preparation
TestingSourceSettings sourceSettings = TestingSourceSettings.builder().setBoundedness(Boundedness.BOUNDED).setCheckpointingMode(semantic).build();
TestEnvironmentSettings envOptions = TestEnvironmentSettings.builder().setConnectorJarPaths(externalContext.getConnectorJarPaths()).build();
Source<T, ?, ?> source = tryCreateSource(externalContext, sourceSettings);
// Step 2: Write test data to external system
int splitNumber = 4;
List<List<T>> testRecordsLists = new ArrayList<>();
for (int i = 0; i < splitNumber; i++) {
testRecordsLists.add(generateAndWriteTestData(i, externalContext, sourceSettings));
}
// Step 3: Build and execute Flink job
StreamExecutionEnvironment execEnv = testEnv.createExecutionEnvironment(envOptions);
DataStreamSource<T> stream = execEnv.fromSource(source, WatermarkStrategy.noWatermarks(), "Tested Source").setParallelism(splitNumber);
CollectIteratorBuilder<T> iteratorBuilder = addCollectSink(stream);
JobClient jobClient = submitJob(execEnv, "Source Multiple Split Test");
// Step 4: Validate test data
try (CloseableIterator<T> resultIterator = iteratorBuilder.build(jobClient)) {
// Check test result
LOG.info("Checking test results");
checkResultWithSemantic(resultIterator, testRecordsLists, semantic, null);
}
}
use of org.apache.flink.core.execution.JobClient in project flink by apache.
the class SourceTestSuiteBase method testSourceSingleSplit.
// ----------------------------- Basic test cases ---------------------------------
/**
* Test connector source with only one split in the external system.
*
* <p>This test will create one split in the external system, write test data into it, and
* consume back via a Flink job with 1 parallelism.
*
* <p>The number and order of records consumed by Flink need to be identical to the test data
* written to the external system in order to pass this test.
*
* <p>A bounded source is required for this test.
*/
@TestTemplate
@DisplayName("Test source with single split")
public void testSourceSingleSplit(TestEnvironment testEnv, DataStreamSourceExternalContext<T> externalContext, CheckpointingMode semantic) throws Exception {
// Step 1: Preparation
TestingSourceSettings sourceSettings = TestingSourceSettings.builder().setBoundedness(Boundedness.BOUNDED).setCheckpointingMode(semantic).build();
TestEnvironmentSettings envSettings = TestEnvironmentSettings.builder().setConnectorJarPaths(externalContext.getConnectorJarPaths()).build();
Source<T, ?, ?> source = tryCreateSource(externalContext, sourceSettings);
// Step 2: Write test data to external system
List<T> testRecords = generateAndWriteTestData(0, externalContext, sourceSettings);
// Step 3: Build and execute Flink job
StreamExecutionEnvironment execEnv = testEnv.createExecutionEnvironment(envSettings);
DataStreamSource<T> stream = execEnv.fromSource(source, WatermarkStrategy.noWatermarks(), "Tested Source").setParallelism(1);
CollectIteratorBuilder<T> iteratorBuilder = addCollectSink(stream);
JobClient jobClient = submitJob(execEnv, "Source Single Split Test");
// Step 5: Validate test data
try (CollectResultIterator<T> resultIterator = iteratorBuilder.build(jobClient)) {
// Check test result
LOG.info("Checking test results");
checkResultWithSemantic(resultIterator, Arrays.asList(testRecords), semantic, null);
}
// Step 5: Clean up
waitForJobStatus(jobClient, Collections.singletonList(JobStatus.FINISHED), Deadline.fromNow(DEFAULT_JOB_STATUS_CHANGE_TIMEOUT));
}
Aggregations