use of org.apache.flink.api.java.ExecutionEnvironment in project flink by apache.
the class CustomSerializationITCase method testIncorrectSerializer4.
@Test
public void testIncorrectSerializer4() {
try {
ExecutionEnvironment env = ExecutionEnvironment.createRemoteEnvironment("localhost", cluster.getLeaderRPCPort());
env.setParallelism(PARLLELISM);
env.getConfig().disableSysoutLogging();
env.generateSequence(1, 10 * PARLLELISM).map(new MapFunction<Long, ConsumesTooLittleSpanning>() {
@Override
public ConsumesTooLittleSpanning map(Long value) throws Exception {
return new ConsumesTooLittleSpanning();
}
}).rebalance().output(new DiscardingOutputFormat<ConsumesTooLittleSpanning>());
env.execute();
} catch (ProgramInvocationException e) {
Throwable rootCause = e.getCause().getCause();
assertTrue(rootCause instanceof IOException);
assertTrue(rootCause.getMessage().contains("broken serialization"));
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.api.java.ExecutionEnvironment in project flink by apache.
the class CustomSerializationITCase method testIncorrectSerializer1.
@Test
public void testIncorrectSerializer1() {
try {
ExecutionEnvironment env = ExecutionEnvironment.createRemoteEnvironment("localhost", cluster.getLeaderRPCPort());
env.setParallelism(PARLLELISM);
env.getConfig().disableSysoutLogging();
env.generateSequence(1, 10 * PARLLELISM).map(new MapFunction<Long, ConsumesTooMuch>() {
@Override
public ConsumesTooMuch map(Long value) throws Exception {
return new ConsumesTooMuch();
}
}).rebalance().output(new DiscardingOutputFormat<ConsumesTooMuch>());
env.execute();
} catch (ProgramInvocationException e) {
Throwable rootCause = e.getCause().getCause();
assertTrue(rootCause instanceof IOException);
assertTrue(rootCause.getMessage().contains("broken serialization"));
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.api.java.ExecutionEnvironment in project flink by apache.
the class CustomSerializationITCase method testIncorrectSerializer3.
@Test
public void testIncorrectSerializer3() {
try {
ExecutionEnvironment env = ExecutionEnvironment.createRemoteEnvironment("localhost", cluster.getLeaderRPCPort());
env.setParallelism(PARLLELISM);
env.getConfig().disableSysoutLogging();
env.generateSequence(1, 10 * PARLLELISM).map(new MapFunction<Long, ConsumesTooLittle>() {
@Override
public ConsumesTooLittle map(Long value) throws Exception {
return new ConsumesTooLittle();
}
}).rebalance().output(new DiscardingOutputFormat<ConsumesTooLittle>());
env.execute();
} catch (ProgramInvocationException e) {
Throwable rootCause = e.getCause().getCause();
assertTrue(rootCause instanceof IOException);
assertTrue(rootCause.getMessage().contains("broken serialization"));
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.api.java.ExecutionEnvironment in project flink by apache.
the class MiscellaneousIssuesITCase method testNullValues.
@Test
public void testNullValues() {
try {
ExecutionEnvironment env = ExecutionEnvironment.createRemoteEnvironment("localhost", cluster.getLeaderRPCPort());
env.setParallelism(1);
env.getConfig().disableSysoutLogging();
DataSet<String> data = env.fromElements("hallo").map(new MapFunction<String, String>() {
@Override
public String map(String value) throws Exception {
return null;
}
});
data.writeAsText("/tmp/myTest", FileSystem.WriteMode.OVERWRITE);
try {
env.execute();
fail("this should fail due to null values.");
} catch (ProgramInvocationException e) {
assertNotNull(e.getCause());
assertNotNull(e.getCause().getCause());
assertTrue(e.getCause().getCause() instanceof NullPointerException);
}
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.api.java.ExecutionEnvironment in project flink by apache.
the class MultipleJoinsWithSolutionSetCompilerTest method testMultiSolutionSetJoinPlan.
@Test
public void testMultiSolutionSetJoinPlan() {
try {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
@SuppressWarnings("unchecked") DataSet<Tuple2<Long, Double>> inputData = env.fromElements(new Tuple2<Long, Double>(1L, 1.0));
DataSet<Tuple2<Long, Double>> result = constructPlan(inputData, 10);
// add two sinks, to test the case of branching after an iteration
result.output(new DiscardingOutputFormat<Tuple2<Long, Double>>());
result.output(new DiscardingOutputFormat<Tuple2<Long, Double>>());
Plan p = env.createProgramPlan();
OptimizedPlan optPlan = compileNoStats(p);
OptimizerPlanNodeResolver or = getOptimizerPlanNodeResolver(optPlan);
DualInputPlanNode join1 = or.getNode(JOIN_1);
DualInputPlanNode join2 = or.getNode(JOIN_2);
assertEquals(DriverStrategy.HYBRIDHASH_BUILD_FIRST, join1.getDriverStrategy());
assertEquals(DriverStrategy.HYBRIDHASH_BUILD_SECOND, join2.getDriverStrategy());
assertEquals(ShipStrategyType.PARTITION_HASH, join1.getInput2().getShipStrategy());
assertEquals(ShipStrategyType.PARTITION_HASH, join2.getInput1().getShipStrategy());
assertEquals(SolutionSetPlanNode.class, join1.getInput1().getSource().getClass());
assertEquals(SolutionSetPlanNode.class, join2.getInput2().getSource().getClass());
new JobGraphGenerator().compileJobGraph(optPlan);
} catch (Exception e) {
System.err.println(e.getMessage());
e.printStackTrace();
fail("Test erroneous: " + e.getMessage());
}
}
Aggregations