use of org.apache.flink.streaming.api.environment.StreamExecutionEnvironment in project flink by apache.
the class LegacyCheckpointedStreamingProgram method main.
public static void main(String[] args) throws Exception {
final String jarFile = args[0];
final String host = args[1];
final int port = Integer.parseInt(args[2]);
StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment(host, port, jarFile);
env.getConfig().disableSysoutLogging();
env.enableCheckpointing(CHECKPOINT_INTERVALL);
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 10000));
env.disableOperatorChaining();
DataStream<String> text = env.addSource(new SimpleStringGenerator());
text.map(new StatefulMapper()).addSink(new NoOpSink());
env.setParallelism(1);
env.execute("Checkpointed Streaming Program");
}
use of org.apache.flink.streaming.api.environment.StreamExecutionEnvironment in project flink by apache.
the class StreamingCustomInputSplitProgram method main.
public static void main(String[] args) throws Exception {
final String jarFile = args[0];
final String host = args[1];
final int port = Integer.parseInt(args[2]);
final int parallelism = Integer.parseInt(args[3]);
Configuration config = new Configuration();
config.setString(ConfigConstants.AKKA_ASK_TIMEOUT, "5 s");
StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment(host, port, config, jarFile);
env.getConfig().disableSysoutLogging();
env.setParallelism(parallelism);
DataStream<Integer> data = env.createInput(new CustomInputFormat());
data.map(new MapFunction<Integer, Tuple2<Integer, Double>>() {
@Override
public Tuple2<Integer, Double> map(Integer value) throws Exception {
return new Tuple2<Integer, Double>(value, value * 0.5);
}
}).addSink(new NoOpSink());
env.execute();
}
use of org.apache.flink.streaming.api.environment.StreamExecutionEnvironment in project flink by apache.
the class FastFailuresITCase method testThis.
@Test
public void testThis() {
Configuration config = new Configuration();
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, 2);
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, 2);
LocalFlinkMiniCluster cluster = new LocalFlinkMiniCluster(config, false);
cluster.start();
StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", cluster.getLeaderRPCPort());
env.getConfig().disableSysoutLogging();
env.setParallelism(4);
env.enableCheckpointing(1000);
env.getConfig().setRestartStrategy(RestartStrategies.fixedDelayRestart(210, 0));
DataStream<Tuple2<Integer, Integer>> input = env.addSource(new RichSourceFunction<Tuple2<Integer, Integer>>() {
@Override
public void open(Configuration parameters) {
if (FAILURES_SO_FAR.incrementAndGet() <= NUM_FAILURES) {
throw new RuntimeException("fail");
}
}
@Override
public void run(SourceContext<Tuple2<Integer, Integer>> ctx) {
}
@Override
public void cancel() {
}
});
input.keyBy(0).map(new MapFunction<Tuple2<Integer, Integer>, Integer>() {
@Override
public Integer map(Tuple2<Integer, Integer> value) {
return value.f0;
}
}).addSink(new SinkFunction<Integer>() {
@Override
public void invoke(Integer value) {
}
});
try {
env.execute();
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.streaming.api.environment.StreamExecutionEnvironment in project flink by apache.
the class JobManagerHACheckpointRecoveryITCase method testCheckpointedStreamingSumProgram.
/**
* Simple checkpointed streaming sum.
*
* <p>The sources (Parallelism) count until sequenceEnd. The sink (1) sums up all counts and
* returns it to the main thread via a static variable. We wait until some checkpoints are
* completed and sanity check that the sources recover with an updated state to make sure that
* this test actually tests something.
*/
@Test
@RetryOnFailure(times = 1)
public void testCheckpointedStreamingSumProgram() throws Exception {
// Config
final int checkpointingInterval = 200;
final int sequenceEnd = 5000;
final long expectedSum = Parallelism * sequenceEnd * (sequenceEnd + 1) / 2;
final StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment();
env.setParallelism(Parallelism);
env.enableCheckpointing(checkpointingInterval);
env.addSource(new CheckpointedSequenceSource(sequenceEnd)).addSink(new CountingSink()).setParallelism(1);
JobGraph jobGraph = env.getStreamGraph().getJobGraph();
Configuration config = ZooKeeperTestUtils.createZooKeeperHAConfig(ZooKeeper.getConnectString(), FileStateBackendBasePath.getAbsoluteFile().toURI().toString());
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, Parallelism);
ActorSystem testSystem = null;
final JobManagerProcess[] jobManagerProcess = new JobManagerProcess[2];
LeaderRetrievalService leaderRetrievalService = null;
ActorSystem taskManagerSystem = null;
try {
final Deadline deadline = TestTimeOut.fromNow();
// Test actor system
testSystem = AkkaUtils.createActorSystem(new Configuration(), new Some<>(new Tuple2<String, Object>("localhost", 0)));
// The job managers
jobManagerProcess[0] = new JobManagerProcess(0, config);
jobManagerProcess[1] = new JobManagerProcess(1, config);
jobManagerProcess[0].startProcess();
jobManagerProcess[1].startProcess();
// Leader listener
TestingListener leaderListener = new TestingListener();
leaderRetrievalService = ZooKeeperUtils.createLeaderRetrievalService(config);
leaderRetrievalService.start(leaderListener);
// The task manager
taskManagerSystem = AkkaUtils.createActorSystem(config, Option.apply(new Tuple2<String, Object>("localhost", 0)));
TaskManager.startTaskManagerComponentsAndActor(config, ResourceID.generate(), taskManagerSystem, "localhost", Option.<String>empty(), Option.<LeaderRetrievalService>empty(), false, TaskManager.class);
{
// Initial submission
leaderListener.waitForNewLeader(deadline.timeLeft().toMillis());
String leaderAddress = leaderListener.getAddress();
UUID leaderId = leaderListener.getLeaderSessionID();
// Get the leader ref
ActorRef leaderRef = AkkaUtils.getActorRef(leaderAddress, testSystem, deadline.timeLeft());
ActorGateway leader = new AkkaActorGateway(leaderRef, leaderId);
// Submit the job in detached mode
leader.tell(new SubmitJob(jobGraph, ListeningBehaviour.DETACHED));
JobManagerActorTestUtils.waitForJobStatus(jobGraph.getJobID(), JobStatus.RUNNING, leader, deadline.timeLeft());
}
// Who's the boss?
JobManagerProcess leadingJobManagerProcess;
if (jobManagerProcess[0].getJobManagerAkkaURL(deadline.timeLeft()).equals(leaderListener.getAddress())) {
leadingJobManagerProcess = jobManagerProcess[0];
} else {
leadingJobManagerProcess = jobManagerProcess[1];
}
CompletedCheckpointsLatch.await();
// Kill the leading job manager process
leadingJobManagerProcess.destroy();
{
// Recovery by the standby JobManager
leaderListener.waitForNewLeader(deadline.timeLeft().toMillis());
String leaderAddress = leaderListener.getAddress();
UUID leaderId = leaderListener.getLeaderSessionID();
ActorRef leaderRef = AkkaUtils.getActorRef(leaderAddress, testSystem, deadline.timeLeft());
ActorGateway leader = new AkkaActorGateway(leaderRef, leaderId);
JobManagerActorTestUtils.waitForJobStatus(jobGraph.getJobID(), JobStatus.RUNNING, leader, deadline.timeLeft());
}
// Wait to finish
FinalCountLatch.await();
assertEquals(expectedSum, (long) FinalCount.get());
for (int i = 0; i < Parallelism; i++) {
assertNotEquals(0, RecoveredStates.get(i));
}
} catch (Throwable t) {
// Reset all static state for test retries
CompletedCheckpointsLatch = new CountDownLatch(2);
RecoveredStates = new AtomicLongArray(Parallelism);
FinalCountLatch = new CountDownLatch(1);
FinalCount = new AtomicReference<>();
LastElement = -1;
// Print early (in some situations the process logs get too big
// for Travis and the root problem is not shown)
t.printStackTrace();
// In case of an error, print the job manager process logs.
if (jobManagerProcess[0] != null) {
jobManagerProcess[0].printProcessLog();
}
if (jobManagerProcess[1] != null) {
jobManagerProcess[1].printProcessLog();
}
throw t;
} finally {
if (jobManagerProcess[0] != null) {
jobManagerProcess[0].destroy();
}
if (jobManagerProcess[1] != null) {
jobManagerProcess[1].destroy();
}
if (leaderRetrievalService != null) {
leaderRetrievalService.stop();
}
if (taskManagerSystem != null) {
taskManagerSystem.shutdown();
}
if (testSystem != null) {
testSystem.shutdown();
}
}
}
use of org.apache.flink.streaming.api.environment.StreamExecutionEnvironment in project flink by apache.
the class TimestampITCase method testErrorOnEventTimeOverProcessingTime.
@Test
public void testErrorOnEventTimeOverProcessingTime() {
StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", cluster.getLeaderRPCPort());
env.setParallelism(2);
env.getConfig().disableSysoutLogging();
env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime);
DataStream<Tuple2<String, Integer>> source1 = env.fromElements(new Tuple2<>("a", 1), new Tuple2<>("b", 2));
source1.keyBy(0).window(TumblingEventTimeWindows.of(Time.seconds(5))).reduce(new ReduceFunction<Tuple2<String, Integer>>() {
@Override
public Tuple2<String, Integer> reduce(Tuple2<String, Integer> value1, Tuple2<String, Integer> value2) {
return value1;
}
}).print();
try {
env.execute();
fail("this should fail with an exception");
} catch (Exception e) {
// expected
}
}
Aggregations