use of org.apache.flink.test.util.MiniClusterWithClientResource in project flink by apache.
the class RescalingITCase method setup.
@Before
public void setup() throws Exception {
// detect parameter change
if (currentBackend != backend) {
shutDownExistingCluster();
currentBackend = backend;
Configuration config = new Configuration();
final File checkpointDir = temporaryFolder.newFolder();
final File savepointDir = temporaryFolder.newFolder();
config.setString(StateBackendOptions.STATE_BACKEND, currentBackend);
config.setString(CheckpointingOptions.CHECKPOINTS_DIRECTORY, checkpointDir.toURI().toString());
config.setString(CheckpointingOptions.SAVEPOINT_DIRECTORY, savepointDir.toURI().toString());
config.setInteger(NettyShuffleEnvironmentOptions.NETWORK_BUFFERS_PER_CHANNEL, buffersPerChannel);
cluster = new MiniClusterWithClientResource(new MiniClusterResourceConfiguration.Builder().setConfiguration(config).setNumberTaskManagers(numTaskManagers).setNumberSlotsPerTaskManager(numSlots).build());
cluster.before();
}
}
use of org.apache.flink.test.util.MiniClusterWithClientResource in project flink by apache.
the class StreamFaultToleranceTestBase method setup.
@Before
public void setup() throws Exception {
Configuration configuration = new Configuration();
switch(failoverStrategy) {
case RestartPipelinedRegionFailoverStrategy:
configuration.setString(JobManagerOptions.EXECUTION_FAILOVER_STRATEGY, "region");
break;
case RestartAllFailoverStrategy:
configuration.setString(JobManagerOptions.EXECUTION_FAILOVER_STRATEGY, "full");
}
// Configure DFS DSTL for this test as it might produce too much GC pressure if
// ChangelogStateBackend is used.
// Doing it on cluster level unconditionally as randomization currently happens on the job
// level (environment); while this factory can only be set on the cluster level.
FsStateChangelogStorageFactory.configure(configuration, tempFolder.newFolder());
cluster = new MiniClusterWithClientResource(new MiniClusterResourceConfiguration.Builder().setConfiguration(configuration).setNumberTaskManagers(NUM_TASK_MANAGERS).setNumberSlotsPerTaskManager(NUM_TASK_SLOTS).build());
cluster.before();
}
use of org.apache.flink.test.util.MiniClusterWithClientResource in project flink by apache.
the class IncrementalStateReuseAfterFailureITCase method before.
@Before
public void before() throws Exception {
Configuration configuration = new Configuration();
FsStateChangelogStorageFactory.configure(configuration, temporaryFolder.newFolder());
miniClusterResource = new MiniClusterWithClientResource(new MiniClusterResourceConfiguration.Builder().setConfiguration(configuration).setNumberTaskManagers(1).setNumberSlotsPerTaskManager(1).build());
miniClusterResource.before();
}
use of org.apache.flink.test.util.MiniClusterWithClientResource in project flink by apache.
the class SavepointITCase method testStopWithSavepointWithDrainCallsFinishBeforeSnapshotState.
@Test
public void testStopWithSavepointWithDrainCallsFinishBeforeSnapshotState() throws Exception {
int sinkParallelism = 5;
MiniClusterWithClientResource cluster = new MiniClusterWithClientResource(new MiniClusterResourceConfiguration.Builder().setNumberSlotsPerTaskManager(sinkParallelism + 1).build());
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.getConfig().setRestartStrategy(RestartStrategies.noRestart());
env.addSource(new InfiniteTestSource()).setParallelism(1).name("Infinite Source").addSink(new FinishingSink<>()).setParallelism(sinkParallelism);
final JobGraph jobGraph = env.getStreamGraph().getJobGraph();
cluster.before();
try {
ClusterClient<?> client = cluster.getClusterClient();
client.submitJob(jobGraph).get();
waitUntilAllTasksAreRunning(cluster.getRestClusterClient(), jobGraph.getJobID());
client.stopWithSavepoint(jobGraph.getJobID(), true, savepointDir.getAbsolutePath(), SavepointFormatType.CANONICAL).get();
// there should be no exceptions and the finish should've been called in the
// FinishingSink
} finally {
cluster.after();
}
}
Aggregations