use of org.apache.flink.api.common.functions.ReduceFunction in project flink by apache.
the class StreamingRuntimeContextTest method testReducingStateInstantiation.
@Test
public void testReducingStateInstantiation() throws Exception {
final ExecutionConfig config = new ExecutionConfig();
config.registerKryoType(Path.class);
final AtomicReference<Object> descriptorCapture = new AtomicReference<>();
StreamingRuntimeContext context = new StreamingRuntimeContext(createDescriptorCapturingMockOp(descriptorCapture, config), createMockEnvironment(), Collections.<String, Accumulator<?, ?>>emptyMap());
@SuppressWarnings("unchecked") ReduceFunction<TaskInfo> reducer = (ReduceFunction<TaskInfo>) mock(ReduceFunction.class);
ReducingStateDescriptor<TaskInfo> descr = new ReducingStateDescriptor<>("name", reducer, TaskInfo.class);
context.getReducingState(descr);
StateDescriptor<?, ?> descrIntercepted = (StateDescriptor<?, ?>) descriptorCapture.get();
TypeSerializer<?> serializer = descrIntercepted.getSerializer();
// check that the Path class is really registered, i.e., the execution config was applied
assertTrue(serializer instanceof KryoSerializer);
assertTrue(((KryoSerializer<?>) serializer).getKryo().getRegistration(Path.class).getId() > 0);
}
use of org.apache.flink.api.common.functions.ReduceFunction in project flink by apache.
the class SimpleRecoveryITCaseBase method testRestartMultipleTimes.
@Test
public void testRestartMultipleTimes() {
try {
List<Long> resultCollection = new ArrayList<Long>();
ExecutionEnvironment env = ExecutionEnvironment.createRemoteEnvironment("localhost", cluster.getLeaderRPCPort());
env.setParallelism(4);
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(5, 100));
env.getConfig().disableSysoutLogging();
env.generateSequence(1, 10).rebalance().map(new FailingMapper3<Long>()).reduce(new ReduceFunction<Long>() {
@Override
public Long reduce(Long value1, Long value2) {
return value1 + value2;
}
}).output(new LocalCollectionOutputFormat<Long>(resultCollection));
executeAndRunAssertions(env);
long sum = 0;
for (long l : resultCollection) {
sum += l;
}
assertEquals(55, sum);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.api.common.functions.ReduceFunction in project flink by apache.
the class TaskManagerFailureRecoveryITCase method testRestartWithFailingTaskManager.
@Test
public void testRestartWithFailingTaskManager() {
final int PARALLELISM = 4;
LocalFlinkMiniCluster cluster = null;
ActorSystem additionalSystem = null;
try {
Configuration config = new Configuration();
config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, 2);
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, PARALLELISM);
config.setInteger(ConfigConstants.TASK_MANAGER_MEMORY_SIZE_KEY, 16);
config.setString(ConfigConstants.AKKA_WATCH_HEARTBEAT_INTERVAL, "500 ms");
config.setString(ConfigConstants.AKKA_WATCH_HEARTBEAT_PAUSE, "20 s");
config.setInteger(ConfigConstants.AKKA_WATCH_THRESHOLD, 20);
cluster = new LocalFlinkMiniCluster(config, false);
cluster.start();
// for the result
List<Long> resultCollection = new ArrayList<Long>();
final ExecutionEnvironment env = ExecutionEnvironment.createRemoteEnvironment("localhost", cluster.getLeaderRPCPort());
env.setParallelism(PARALLELISM);
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 1000));
env.getConfig().disableSysoutLogging();
env.generateSequence(1, 10).map(new FailingMapper<Long>()).reduce(new ReduceFunction<Long>() {
@Override
public Long reduce(Long value1, Long value2) {
return value1 + value2;
}
}).output(new LocalCollectionOutputFormat<Long>(resultCollection));
// simple reference (atomic does not matter) to pass back an exception from the trigger thread
final AtomicReference<Throwable> ref = new AtomicReference<Throwable>();
// trigger the execution from a separate thread, so we are available to temper with the
// cluster during the execution
Thread trigger = new Thread("program trigger") {
@Override
public void run() {
try {
env.execute();
} catch (Throwable t) {
ref.set(t);
}
}
};
trigger.setDaemon(true);
trigger.start();
// the mappers in turn are waiting
for (int i = 0; i < PARALLELISM; i++) {
FailingMapper.TASK_TO_COORD_QUEUE.take();
}
// bring up one more task manager and wait for it to appear
{
additionalSystem = cluster.startTaskManagerActorSystem(2);
ActorRef additionalTaskManager = cluster.startTaskManager(2, additionalSystem);
Object message = TaskManagerMessages.getNotifyWhenRegisteredAtJobManagerMessage();
Future<Object> future = Patterns.ask(additionalTaskManager, message, 30000);
try {
Await.result(future, new FiniteDuration(30000, TimeUnit.MILLISECONDS));
} catch (TimeoutException e) {
fail("The additional TaskManager did not come up within 30 seconds");
}
}
// kill the two other TaskManagers
for (ActorRef tm : cluster.getTaskManagersAsJava()) {
tm.tell(PoisonPill.getInstance(), null);
}
// wait for the next set of mappers (the recovery ones) to come online
for (int i = 0; i < PARALLELISM; i++) {
FailingMapper.TASK_TO_COORD_QUEUE.take();
}
// tell the mappers that they may continue this time
for (int i = 0; i < PARALLELISM; i++) {
FailingMapper.COORD_TO_TASK_QUEUE.add(new Object());
}
// wait for the program to finish
trigger.join();
if (ref.get() != null) {
Throwable t = ref.get();
t.printStackTrace();
fail("Program execution caused an exception: " + t.getMessage());
}
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
} finally {
if (additionalSystem != null) {
additionalSystem.shutdown();
}
if (cluster != null) {
cluster.stop();
}
}
}
Aggregations