use of org.apache.flink.util.MutableObjectIterator in project flink by apache.
the class NonReusingHashJoinIteratorITCase method testBuildFirstWithHighNumberOfCommonKeys.
@Test
public void testBuildFirstWithHighNumberOfCommonKeys() {
// the size of the left and right inputs
final int INPUT_1_SIZE = 200;
final int INPUT_2_SIZE = 100;
final int INPUT_1_DUPLICATES = 10;
final int INPUT_2_DUPLICATES = 2000;
final int DUPLICATE_KEY = 13;
try {
TupleGenerator generator1 = new TupleGenerator(SEED1, 500, 4096, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
TupleGenerator generator2 = new TupleGenerator(SEED2, 500, 2048, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
final TestData.TupleGeneratorIterator gen1Iter = new TestData.TupleGeneratorIterator(generator1, INPUT_1_SIZE);
final TestData.TupleGeneratorIterator gen2Iter = new TestData.TupleGeneratorIterator(generator2, INPUT_2_SIZE);
final TestData.TupleConstantValueIterator const1Iter = new TestData.TupleConstantValueIterator(DUPLICATE_KEY, "LEFT String for Duplicate Keys", INPUT_1_DUPLICATES);
final TestData.TupleConstantValueIterator const2Iter = new TestData.TupleConstantValueIterator(DUPLICATE_KEY, "RIGHT String for Duplicate Keys", INPUT_2_DUPLICATES);
final List<MutableObjectIterator<Tuple2<Integer, String>>> inList1 = new ArrayList<>();
inList1.add(gen1Iter);
inList1.add(const1Iter);
final List<MutableObjectIterator<Tuple2<Integer, String>>> inList2 = new ArrayList<>();
inList2.add(gen2Iter);
inList2.add(const2Iter);
MutableObjectIterator<Tuple2<Integer, String>> input1 = new UnionIterator<>(inList1);
MutableObjectIterator<Tuple2<Integer, String>> input2 = new UnionIterator<>(inList2);
// collect expected data
final Map<Integer, Collection<TupleMatch>> expectedMatchesMap = joinTuples(collectTupleData(input1), collectTupleData(input2));
// re-create the whole thing for actual processing
// reset the generators and iterators
generator1.reset();
generator2.reset();
const1Iter.reset();
const2Iter.reset();
gen1Iter.reset();
gen2Iter.reset();
inList1.clear();
inList1.add(gen1Iter);
inList1.add(const1Iter);
inList2.clear();
inList2.add(gen2Iter);
inList2.add(const2Iter);
input1 = new UnionIterator<>(inList1);
input2 = new UnionIterator<>(inList2);
final TupleMatchRemovingJoin matcher = new TupleMatchRemovingJoin(expectedMatchesMap);
final Collector<Tuple2<Integer, String>> collector = new DiscardingOutputCollector<>();
NonReusingBuildFirstHashJoinIterator<Tuple2<Integer, String>, Tuple2<Integer, String>, Tuple2<Integer, String>> iterator = new NonReusingBuildFirstHashJoinIterator<>(input1, input2, this.recordSerializer, this.record1Comparator, this.recordSerializer, this.record2Comparator, this.recordPairComparator, this.memoryManager, ioManager, this.parentTask, 1.0, false, false, true);
iterator.open();
while (iterator.callWithNextKey(matcher, collector)) ;
iterator.close();
// assert that each expected match was seen
for (Entry<Integer, Collection<TupleMatch>> entry : expectedMatchesMap.entrySet()) {
if (!entry.getValue().isEmpty()) {
Assert.fail("Collection for key " + entry.getKey() + " is not empty");
}
}
} catch (Exception e) {
e.printStackTrace();
Assert.fail("An exception occurred during the test: " + e.getMessage());
}
}
use of org.apache.flink.util.MutableObjectIterator in project flink by apache.
the class IterationHeadTask method run.
@Override
public void run() throws Exception {
final String brokerKey = brokerKey();
final int workerIndex = getEnvironment().getTaskInfo().getIndexOfThisSubtask();
final boolean objectSolutionSet = config.isSolutionSetUnmanaged();
// if workset iteration
CompactingHashTable<X> solutionSet = null;
JoinHashMap<X> solutionSetObjectMap = // if workset iteration with unmanaged solution set
null;
boolean waitForSolutionSetUpdate = config.getWaitForSolutionSetUpdate();
boolean isWorksetIteration = config.getIsWorksetIteration();
try {
/* used for receiving the current iteration result from iteration tail */
SuperstepKickoffLatch nextStepKickoff = new SuperstepKickoffLatch();
SuperstepKickoffLatchBroker.instance().handIn(brokerKey, nextStepKickoff);
BlockingBackChannel backChannel = initBackChannel();
SuperstepBarrier barrier = initSuperstepBarrier();
SolutionSetUpdateBarrier solutionSetUpdateBarrier = null;
feedbackDataInput = config.getIterationHeadPartialSolutionOrWorksetInputIndex();
feedbackTypeSerializer = this.getInputSerializer(feedbackDataInput);
excludeFromReset(feedbackDataInput);
int initialSolutionSetInput;
if (isWorksetIteration) {
initialSolutionSetInput = config.getIterationHeadSolutionSetInputIndex();
solutionTypeSerializer = config.getSolutionSetSerializer(getUserCodeClassLoader());
// setup the index for the solution set
@SuppressWarnings("unchecked") MutableObjectIterator<X> solutionSetInput = (MutableObjectIterator<X>) createInputIterator(inputReaders[initialSolutionSetInput], solutionTypeSerializer);
// read the initial solution set
if (objectSolutionSet) {
solutionSetObjectMap = initJoinHashMap();
readInitialSolutionSet(solutionSetObjectMap, solutionSetInput);
SolutionSetBroker.instance().handIn(brokerKey, solutionSetObjectMap);
} else {
solutionSet = initCompactingHashTable();
readInitialSolutionSet(solutionSet, solutionSetInput);
SolutionSetBroker.instance().handIn(brokerKey, solutionSet);
}
if (waitForSolutionSetUpdate) {
solutionSetUpdateBarrier = new SolutionSetUpdateBarrier();
SolutionSetUpdateBarrierBroker.instance().handIn(brokerKey, solutionSetUpdateBarrier);
}
} else {
// bulk iteration case
@SuppressWarnings("unchecked") TypeSerializerFactory<X> solSer = (TypeSerializerFactory<X>) feedbackTypeSerializer;
solutionTypeSerializer = solSer;
// = termination Criterion tail
if (waitForSolutionSetUpdate) {
solutionSetUpdateBarrier = new SolutionSetUpdateBarrier();
SolutionSetUpdateBarrierBroker.instance().handIn(brokerKey, solutionSetUpdateBarrier);
}
}
// instantiate all aggregators and register them at the iteration global registry
RuntimeAggregatorRegistry aggregatorRegistry = new RuntimeAggregatorRegistry(config.getIterationAggregators(getUserCodeClassLoader()));
IterationAggregatorBroker.instance().handIn(brokerKey, aggregatorRegistry);
DataInputView superstepResult = null;
while (this.running && !terminationRequested()) {
if (log.isInfoEnabled()) {
log.info(formatLogString("starting iteration [" + currentIteration() + "]"));
}
barrier.setup();
if (waitForSolutionSetUpdate) {
solutionSetUpdateBarrier.setup();
}
if (!inFirstIteration()) {
feedBackSuperstepResult(superstepResult);
}
super.run();
// signal to connected tasks that we are done with the superstep
sendEndOfSuperstepToAllIterationOutputs();
if (waitForSolutionSetUpdate) {
solutionSetUpdateBarrier.waitForSolutionSetUpdate();
}
// blocking call to wait for the result
superstepResult = backChannel.getReadEndAfterSuperstepEnded();
if (log.isInfoEnabled()) {
log.info(formatLogString("finishing iteration [" + currentIteration() + "]"));
}
sendEventToSync(new WorkerDoneEvent(workerIndex, aggregatorRegistry.getAllAggregators()));
if (log.isInfoEnabled()) {
log.info(formatLogString("waiting for other workers in iteration [" + currentIteration() + "]"));
}
barrier.waitForOtherWorkers();
if (barrier.terminationSignaled()) {
if (log.isInfoEnabled()) {
log.info(formatLogString("head received termination request in iteration [" + currentIteration() + "]"));
}
requestTermination();
nextStepKickoff.signalTermination();
} else {
incrementIterationCounter();
String[] globalAggregateNames = barrier.getAggregatorNames();
Value[] globalAggregates = barrier.getAggregates();
aggregatorRegistry.updateGlobalAggregatesAndReset(globalAggregateNames, globalAggregates);
nextStepKickoff.triggerNextSuperstep();
}
}
if (log.isInfoEnabled()) {
log.info(formatLogString("streaming out final result after [" + currentIteration() + "] iterations"));
}
if (isWorksetIteration) {
if (objectSolutionSet) {
streamSolutionSetToFinalOutput(solutionSetObjectMap);
} else {
streamSolutionSetToFinalOutput(solutionSet);
}
} else {
streamOutFinalOutputBulk(new InputViewIterator<X>(superstepResult, this.solutionTypeSerializer.getSerializer()));
}
this.finalOutputCollector.close();
} finally {
// make sure we unregister everything from the broker:
// - backchannel
// - aggregator registry
// - solution set index
IterationAggregatorBroker.instance().remove(brokerKey);
BlockingBackChannelBroker.instance().remove(brokerKey);
SuperstepKickoffLatchBroker.instance().remove(brokerKey);
SolutionSetBroker.instance().remove(brokerKey);
SolutionSetUpdateBarrierBroker.instance().remove(brokerKey);
if (solutionSet != null) {
solutionSet.close();
}
}
}
use of org.apache.flink.util.MutableObjectIterator in project flink by apache.
the class HashTableITCase method testFailingHashJoinTooManyRecursions.
/*
* This test is basically identical to the "testSpillingHashJoinWithMassiveCollisions" test, only that the number
* of repeated values (causing bucket collisions) are large enough to make sure that their target partition no longer
* fits into memory by itself and needs to be repartitioned in the recursion again.
*/
@Test
public void testFailingHashJoinTooManyRecursions() throws IOException {
// the following two values are known to have a hash-code collision on the first recursion
// level.
// we use them to make sure one partition grows over-proportionally large
final int REPEATED_VALUE_1 = 40559;
final int REPEATED_VALUE_2 = 92882;
final int REPEATED_VALUE_COUNT = 3000000;
final int NUM_KEYS = 1000000;
final int BUILD_VALS_PER_KEY = 3;
final int PROBE_VALS_PER_KEY = 10;
// create a build input that gives 3 million pairs with 3 values sharing the same key, plus
// 400k pairs with two colliding keys
MutableObjectIterator<Record> build1 = new UniformRecordGenerator(NUM_KEYS, BUILD_VALS_PER_KEY, false);
MutableObjectIterator<Record> build2 = new ConstantsKeyValuePairsIterator(REPEATED_VALUE_1, 17, REPEATED_VALUE_COUNT);
MutableObjectIterator<Record> build3 = new ConstantsKeyValuePairsIterator(REPEATED_VALUE_2, 23, REPEATED_VALUE_COUNT);
List<MutableObjectIterator<Record>> builds = new ArrayList<MutableObjectIterator<Record>>();
builds.add(build1);
builds.add(build2);
builds.add(build3);
MutableObjectIterator<Record> buildInput = new UnionIterator<Record>(builds);
// create a probe input that gives 10 million pairs with 10 values sharing a key
MutableObjectIterator<Record> probe1 = new UniformRecordGenerator(NUM_KEYS, PROBE_VALS_PER_KEY, true);
MutableObjectIterator<Record> probe2 = new ConstantsKeyValuePairsIterator(REPEATED_VALUE_1, 17, REPEATED_VALUE_COUNT);
MutableObjectIterator<Record> probe3 = new ConstantsKeyValuePairsIterator(REPEATED_VALUE_2, 23, REPEATED_VALUE_COUNT);
List<MutableObjectIterator<Record>> probes = new ArrayList<MutableObjectIterator<Record>>();
probes.add(probe1);
probes.add(probe2);
probes.add(probe3);
MutableObjectIterator<Record> probeInput = new UnionIterator<Record>(probes);
// allocate the memory for the HashTable
List<MemorySegment> memSegments;
try {
memSegments = this.memManager.allocatePages(MEM_OWNER, 896);
} catch (MemoryAllocationException maex) {
fail("Memory for the Join could not be provided.");
return;
}
// ----------------------------------------------------------------------------------------
final MutableHashTable<Record, Record> join = new MutableHashTable<Record, Record>(this.recordBuildSideAccesssor, this.recordProbeSideAccesssor, this.recordBuildSideComparator, this.recordProbeSideComparator, this.pactRecordComparator, memSegments, ioManager);
join.open(buildInput, probeInput);
final Record recordReuse = new Record();
try {
while (join.nextRecord()) {
MutableObjectIterator<Record> buildSide = join.getBuildSideIterator();
if (buildSide.next(recordReuse) == null) {
fail("No build side values found for a probe key.");
}
while (buildSide.next(recordReuse) != null) ;
}
fail("Hash Join must have failed due to too many recursions.");
} catch (Exception ex) {
// expected
}
join.close();
// ----------------------------------------------------------------------------------------
this.memManager.release(join.getFreedMemory());
}
use of org.apache.flink.util.MutableObjectIterator in project flink by apache.
the class CompactingHashTableTest method testHashTableGrowthWithInsertOrReplace.
/**
* This test validates that records are not lost via "insertOrReplace()" as in bug [FLINK-2361]
*
* <p>This has to be duplicated in InPlaceMutableHashTableTest and CompactingHashTableTest
* because of the different constructor calls.
*/
@Test
public void testHashTableGrowthWithInsertOrReplace() {
try {
final int numElements = 1000000;
List<MemorySegment> memory = getMemory(10000, 32 * 1024);
// we create a hash table that thinks the records are super large. that makes it choose
// initially
// a lot of memory for the partition buffers, and start with a smaller hash table. that
// way
// we trigger a hash table growth early.
CompactingHashTable<Tuple2<Long, String>> table = new CompactingHashTable<>(tuple2LongStringSerializer, tuple2LongStringComparator, memory, 10000);
table.open();
for (long i = 0; i < numElements; i++) {
table.insertOrReplaceRecord(new Tuple2<Long, String>(i, String.valueOf(i)));
}
// make sure that all elements are contained via the entry iterator
{
BitSet bitSet = new BitSet(numElements);
MutableObjectIterator<Tuple2<Long, String>> iter = table.getEntryIterator();
Tuple2<Long, String> next;
while ((next = iter.next()) != null) {
assertNotNull(next.f0);
assertNotNull(next.f1);
assertEquals(next.f0.longValue(), Long.parseLong(next.f1));
bitSet.set(next.f0.intValue());
}
assertEquals(numElements, bitSet.cardinality());
}
// make sure all entries are contained via the prober
{
CompactingHashTable<Tuple2<Long, String>>.HashTableProber<Long> proper = table.getProber(probeComparator, pairComparator);
for (long i = 0; i < numElements; i++) {
assertNotNull(proper.getMatchFor(i));
assertNull(proper.getMatchFor(i + numElements));
}
}
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.util.MutableObjectIterator in project flink by apache.
the class AbstractBinaryExternalMerger method getMergingIterator.
/**
* Returns an iterator that iterates over the merged result from all given channels.
*
* @param channelIDs The channels that are to be merged and returned.
* @return An iterator over the merged records of the input channels.
* @throws IOException Thrown, if the readers encounter an I/O problem.
*/
public BinaryMergeIterator<Entry> getMergingIterator(List<ChannelWithMeta> channelIDs, List<FileIOChannel> openChannels) throws IOException {
// create one iterator per channel id
if (LOG.isDebugEnabled()) {
LOG.debug("Performing merge of " + channelIDs.size() + " sorted streams.");
}
final List<MutableObjectIterator<Entry>> iterators = new ArrayList<>(channelIDs.size() + 1);
for (ChannelWithMeta channel : channelIDs) {
AbstractChannelReaderInputView view = FileChannelUtil.createInputView(ioManager, channel, openChannels, compressionEnable, compressionCodecFactory, compressionBlockSize, pageSize);
iterators.add(channelReaderInputViewIterator(view));
}
return new BinaryMergeIterator<>(iterators, mergeReusedEntries(channelIDs.size()), mergeComparator());
}
Aggregations