use of org.apache.flink.api.common.JobExecutionResult in project flink by apache.
the class SerializedJobExecutionResultTest method testSerializationWithNullValues.
@Test
public void testSerializationWithNullValues() {
try {
SerializedJobExecutionResult result = new SerializedJobExecutionResult(null, 0L, null);
SerializedJobExecutionResult cloned = CommonTestUtils.createCopySerializable(result);
assertNull(cloned.getJobId());
assertEquals(0L, cloned.getNetRuntime());
assertNull(cloned.getSerializedAccumulatorResults());
JobExecutionResult jResult = result.toJobExecutionResult(getClass().getClassLoader());
assertNull(jResult.getJobID());
assertTrue(jResult.getAllAccumulatorResults().isEmpty());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.api.common.JobExecutionResult in project flink by apache.
the class SerializedJobExecutionResultTest method testSerialization.
@Test
public void testSerialization() {
try {
final ClassLoader classloader = getClass().getClassLoader();
JobID origJobId = new JobID();
long origTime = 65927436589267L;
Map<String, SerializedValue<Object>> origMap = new HashMap<String, SerializedValue<Object>>();
origMap.put("name1", new SerializedValue<Object>(723L));
origMap.put("name2", new SerializedValue<Object>("peter"));
SerializedJobExecutionResult result = new SerializedJobExecutionResult(origJobId, origTime, origMap);
// serialize and deserialize the object
SerializedJobExecutionResult cloned = CommonTestUtils.createCopySerializable(result);
assertEquals(origJobId, cloned.getJobId());
assertEquals(origTime, cloned.getNetRuntime());
assertEquals(origTime, cloned.getNetRuntime(TimeUnit.MILLISECONDS));
assertEquals(origMap, cloned.getSerializedAccumulatorResults());
// convert to deserialized result
JobExecutionResult jResult = result.toJobExecutionResult(classloader);
JobExecutionResult jResultCopied = result.toJobExecutionResult(classloader);
assertEquals(origJobId, jResult.getJobID());
assertEquals(origJobId, jResultCopied.getJobID());
assertEquals(origTime, jResult.getNetRuntime());
assertEquals(origTime, jResult.getNetRuntime(TimeUnit.MILLISECONDS));
assertEquals(origTime, jResultCopied.getNetRuntime());
assertEquals(origTime, jResultCopied.getNetRuntime(TimeUnit.MILLISECONDS));
for (Map.Entry<String, SerializedValue<Object>> entry : origMap.entrySet()) {
String name = entry.getKey();
Object value = entry.getValue().deserializeValue(classloader);
assertEquals(value, jResult.getAccumulatorResult(name));
assertEquals(value, jResultCopied.getAccumulatorResult(name));
}
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.api.common.JobExecutionResult in project flink by apache.
the class RichInputOutputITCase method testProgram.
@Override
protected void testProgram() throws Exception {
// test verifying the number of records read and written vs the accumulator counts
readCalls = new ConcurrentLinkedQueue<Integer>();
writeCalls = new ConcurrentLinkedQueue<Integer>();
final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
env.createInput(new TestInputFormat(new Path(inputPath))).output(new TestOutputFormat());
JobExecutionResult result = env.execute();
Object a = result.getAllAccumulatorResults().get("DATA_SOURCE_ACCUMULATOR");
Object b = result.getAllAccumulatorResults().get("DATA_SINK_ACCUMULATOR");
long recordsRead = (Long) a;
long recordsWritten = (Long) b;
assertEquals(recordsRead, readCalls.size());
assertEquals(recordsWritten, writeCalls.size());
}
use of org.apache.flink.api.common.JobExecutionResult in project flink by apache.
the class SessionWindowITCase method runTest.
private void runTest(SourceFunction<SessionEvent<Integer, TestEventPayload>> dataSource, WindowFunction<SessionEvent<Integer, TestEventPayload>, String, Tuple, TimeWindow> windowFunction) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
WindowedStream<SessionEvent<Integer, TestEventPayload>, Tuple, TimeWindow> windowedStream = env.addSource(dataSource).keyBy("sessionKey").window(EventTimeSessionWindows.withGap(Time.milliseconds(MAX_SESSION_EVENT_GAP_MS)));
if (ALLOWED_LATENESS_MS != Long.MAX_VALUE) {
windowedStream = windowedStream.allowedLateness(Time.milliseconds(ALLOWED_LATENESS_MS));
}
if (PURGE_WINDOW_ON_FIRE) {
windowedStream = windowedStream.trigger(PurgingTrigger.of(EventTimeTrigger.create()));
}
windowedStream.apply(windowFunction).print();
JobExecutionResult result = env.execute();
// check that overall event counts match with our expectations. remember that late events within lateness will
// each trigger a window!
Assert.assertEquals((LATE_EVENTS_PER_SESSION + 1) * NUMBER_OF_SESSIONS * EVENTS_PER_SESSION, (long) result.getAccumulatorResult(SESSION_COUNTER_ON_TIME_KEY));
Assert.assertEquals(NUMBER_OF_SESSIONS * (LATE_EVENTS_PER_SESSION * (LATE_EVENTS_PER_SESSION + 1) / 2), (long) result.getAccumulatorResult(SESSION_COUNTER_LATE_KEY));
}
use of org.apache.flink.api.common.JobExecutionResult in project flink by apache.
the class CollectionTestEnvironment method execute.
@Override
public JobExecutionResult execute(String jobName) throws Exception {
JobExecutionResult result = super.execute(jobName);
this.lastJobExecutionResult = result;
return result;
}
Aggregations