use of org.apache.flink.runtime.operators.hash.NonReusingHashJoinIteratorITCase.TupleMatch in project flink by apache.
the class ReusingReOpenableHashTableITCase method doTest.
protected void doTest(TestData.TupleGeneratorIterator buildInput, TestData.TupleGeneratorIterator probeInput, TupleGenerator bgen, TupleGenerator pgen) throws Exception {
// collect expected data
final Map<Integer, Collection<TupleMatch>> expectedFirstMatchesMap = joinTuples(collectTupleData(buildInput), collectTupleData(probeInput));
final List<Map<Integer, Collection<TupleMatch>>> expectedNMatchesMapList = new ArrayList<>(NUM_PROBES);
final FlatJoinFunction[] nMatcher = new TupleMatchRemovingJoin[NUM_PROBES];
for (int i = 0; i < NUM_PROBES; i++) {
Map<Integer, Collection<TupleMatch>> tmp;
expectedNMatchesMapList.add(tmp = deepCopy(expectedFirstMatchesMap));
nMatcher[i] = new TupleMatchRemovingJoin(tmp);
}
final FlatJoinFunction firstMatcher = new TupleMatchRemovingJoin(expectedFirstMatchesMap);
final Collector<Tuple2<Integer, String>> collector = new DiscardingOutputCollector<>();
// reset the generators
bgen.reset();
pgen.reset();
buildInput.reset();
probeInput.reset();
// compare with iterator values
ReusingBuildFirstReOpenableHashJoinIterator<Tuple2<Integer, String>, Tuple2<Integer, String>, Tuple2<Integer, String>> iterator = new ReusingBuildFirstReOpenableHashJoinIterator<>(buildInput, probeInput, this.recordSerializer, this.record1Comparator, this.recordSerializer, this.record2Comparator, this.recordPairComparator, this.memoryManager, ioManager, this.parentTask, 1.0, false, false, true);
iterator.open();
// do first join with both inputs
while (iterator.callWithNextKey(firstMatcher, collector)) ;
// assert that each expected match was seen for the first input
for (Entry<Integer, Collection<TupleMatch>> entry : expectedFirstMatchesMap.entrySet()) {
if (!entry.getValue().isEmpty()) {
Assert.fail("Collection for key " + entry.getKey() + " is not empty");
}
}
for (int i = 0; i < NUM_PROBES; i++) {
pgen.reset();
probeInput.reset();
// prepare ..
iterator.reopenProbe(probeInput);
// .. and do second join
while (iterator.callWithNextKey(nMatcher[i], collector)) ;
// assert that each expected match was seen for the second input
for (Entry<Integer, Collection<TupleMatch>> entry : expectedNMatchesMapList.get(i).entrySet()) {
if (!entry.getValue().isEmpty()) {
Assert.fail("Collection for key " + entry.getKey() + " is not empty");
}
}
}
iterator.close();
}
use of org.apache.flink.runtime.operators.hash.NonReusingHashJoinIteratorITCase.TupleMatch in project flink by apache.
the class NonReusingReOpenableHashTableITCase method doTest.
protected void doTest(TestData.TupleGeneratorIterator buildInput, TestData.TupleGeneratorIterator probeInput, TupleGenerator bgen, TupleGenerator pgen) throws Exception {
// collect expected data
final Map<Integer, Collection<TupleMatch>> expectedFirstMatchesMap = joinTuples(collectTupleData(buildInput), collectTupleData(probeInput));
final List<Map<Integer, Collection<TupleMatch>>> expectedNMatchesMapList = new ArrayList<>(NUM_PROBES);
final FlatJoinFunction[] nMatcher = new TupleMatchRemovingJoin[NUM_PROBES];
for (int i = 0; i < NUM_PROBES; i++) {
Map<Integer, Collection<TupleMatch>> tmp;
expectedNMatchesMapList.add(tmp = deepCopy(expectedFirstMatchesMap));
nMatcher[i] = new TupleMatchRemovingJoin(tmp);
}
final FlatJoinFunction firstMatcher = new TupleMatchRemovingJoin(expectedFirstMatchesMap);
final Collector<Tuple2<Integer, String>> collector = new DiscardingOutputCollector<>();
// reset the generators
bgen.reset();
pgen.reset();
buildInput.reset();
probeInput.reset();
// compare with iterator values
NonReusingBuildFirstReOpenableHashJoinIterator<Tuple2<Integer, String>, Tuple2<Integer, String>, Tuple2<Integer, String>> iterator = new NonReusingBuildFirstReOpenableHashJoinIterator<>(buildInput, probeInput, this.recordSerializer, this.record1Comparator, this.recordSerializer, this.record2Comparator, this.recordPairComparator, this.memoryManager, ioManager, this.parentTask, 1.0, false, false, true);
iterator.open();
// do first join with both inputs
while (iterator.callWithNextKey(firstMatcher, collector)) ;
// assert that each expected match was seen for the first input
for (Entry<Integer, Collection<TupleMatch>> entry : expectedFirstMatchesMap.entrySet()) {
if (!entry.getValue().isEmpty()) {
Assert.fail("Collection for key " + entry.getKey() + " is not empty");
}
}
for (int i = 0; i < NUM_PROBES; i++) {
pgen.reset();
probeInput.reset();
// prepare ..
iterator.reopenProbe(probeInput);
// .. and do second join
while (iterator.callWithNextKey(nMatcher[i], collector)) ;
// assert that each expected match was seen for the second input
for (Entry<Integer, Collection<TupleMatch>> entry : expectedNMatchesMapList.get(i).entrySet()) {
if (!entry.getValue().isEmpty()) {
Assert.fail("Collection for key " + entry.getKey() + " is not empty");
}
}
}
iterator.close();
}
use of org.apache.flink.runtime.operators.hash.NonReusingHashJoinIteratorITCase.TupleMatch in project flink by apache.
the class ReOpenableHashTableTestBase method deepCopy.
static Map<Integer, Collection<TupleMatch>> deepCopy(Map<Integer, Collection<TupleMatch>> expectedSecondMatchesMap) {
Map<Integer, Collection<TupleMatch>> copy = new HashMap<>(expectedSecondMatchesMap.size());
for (Map.Entry<Integer, Collection<TupleMatch>> entry : expectedSecondMatchesMap.entrySet()) {
List<TupleMatch> matches = new ArrayList<TupleMatch>(entry.getValue().size());
for (TupleMatch m : entry.getValue()) {
matches.add(m);
}
copy.put(entry.getKey(), matches);
}
return copy;
}
Aggregations