use of org.junit.Assert.assertThat in project knime-core by knime.
the class DefaultNodeProgressMonitorTest method internalTestManyMessageEvents.
/**
* A lot of incremental numeric progress updates + many message events
* Previously, this took significantly longer due to expensive string construction.
*/
private void internalTestManyMessageEvents(final NodeProgressMonitor toMonitor, final NodeProgressMonitor toControl) throws Exception {
final int parts = 1000000;
final MutableLong stringComposeCounter = new MutableLong();
Function<Integer, String> msgFct = (index) -> {
stringComposeCounter.increment();
return "Row " + index + " (Row \"" + RowKey.createRowKey((long) index) + "\")";
};
final Pointer<NodeProgress> progressPointer = new Pointer<>();
String lastExpectedMsg = msgFct.apply(parts);
final Function<NodeProgress, Boolean> isLastEventFunction = p -> p.getMessage().equals(lastExpectedMsg);
NodeProgressListener l = createListener(progressPointer, isLastEventFunction);
toMonitor.addProgressListener(l);
try {
for (int i = 1; i < parts + 1; i++) {
final int index = i;
// if this line is replaced by a direct string composition this takes an order of magnitude longer
toControl.setProgress(i / (double) parts, () -> msgFct.apply(index));
}
synchronized (isLastEventFunction) {
isLastEventFunction.wait(500);
}
assertThat(progressPointer.get().getProgress(), is(closeTo(1.0, PROG_EPSILON)));
assertThat(progressPointer.get().getMessage(), is(equalTo(lastExpectedMsg)));
// the lazy string creation should only be called 4 times a second at most,
// it must be at least two - one for the reference string creation and one during an event
// 2020-01-08, BW: increment from max=5 to max=8 -- encountered a longer running test case on Win Server
Assert.assertThat(stringComposeCounter.getValue(), is(allOf(greaterThanOrEqualTo(2L), lessThanOrEqualTo(8L))));
} finally {
toMonitor.removeProgressListener(l);
}
}
use of org.junit.Assert.assertThat in project flink by apache.
the class JobGraphGeneratorTest method testGeneratingJobGraphWithUnconsumedResultPartition.
@Test
public void testGeneratingJobGraphWithUnconsumedResultPartition() {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSet<Tuple2<Long, Long>> input = env.fromElements(new Tuple2<>(1L, 2L)).setParallelism(1);
DataSet<Tuple2<Long, Long>> ds = input.map(new IdentityMapper<>()).setParallelism(3);
AbstractID intermediateDataSetID = new AbstractID();
// this output branch will be excluded.
ds.output(BlockingShuffleOutputFormat.createOutputFormat(intermediateDataSetID)).setParallelism(1);
// this is the normal output branch.
ds.output(new DiscardingOutputFormat<>()).setParallelism(1);
JobGraph jobGraph = compileJob(env);
Assert.assertEquals(3, jobGraph.getVerticesSortedTopologicallyFromSources().size());
JobVertex mapVertex = jobGraph.getVerticesSortedTopologicallyFromSources().get(1);
Assert.assertThat(mapVertex, Matchers.instanceOf(JobVertex.class));
// there are 2 output result with one of them is ResultPartitionType.BLOCKING_PERSISTENT
Assert.assertEquals(2, mapVertex.getProducedDataSets().size());
Assert.assertTrue(mapVertex.getProducedDataSets().stream().anyMatch(dataSet -> dataSet.getId().equals(new IntermediateDataSetID(intermediateDataSetID)) && dataSet.getResultType() == ResultPartitionType.BLOCKING_PERSISTENT));
}
Aggregations