Search in sources :

Example 36 with MemorySegment

use of org.apache.flink.core.memory.MemorySegment in project flink by apache.

the class CompactingHashTableTest method testDoubleResize.

@Test
public void testDoubleResize() {
    // Only CompactingHashTable
    try {
        final int NUM_MEM_PAGES = 30 * NUM_PAIRS / PAGE_SIZE;
        final Random rnd = new Random(RANDOM_SEED);
        final IntPair[] pairs = getRandomizedIntPairs(NUM_PAIRS, rnd);
        List<MemorySegment> memory = getMemory(NUM_MEM_PAGES);
        CompactingHashTable<IntPair> table = new CompactingHashTable<IntPair>(intPairSerializer, intPairComparator, memory);
        table.open();
        for (int i = 0; i < NUM_PAIRS; i++) {
            table.insert(pairs[i]);
        }
        AbstractHashTableProber<IntPair, IntPair> prober = table.getProber(intPairComparator, new SameTypePairComparator<>(intPairComparator));
        IntPair target = new IntPair();
        for (int i = 0; i < NUM_PAIRS; i++) {
            assertNotNull(prober.getMatchFor(pairs[i], target));
            assertEquals(pairs[i].getValue(), target.getValue());
        }
        // make sure there is enough memory for resize
        memory.addAll(getMemory(ADDITIONAL_MEM));
        Boolean b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
        assertTrue(b);
        for (int i = 0; i < NUM_PAIRS; i++) {
            assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
            assertEquals(pairs[i].getValue(), target.getValue());
        }
        // make sure there is enough memory for resize
        memory.addAll(getMemory(ADDITIONAL_MEM));
        b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
        assertTrue(b);
        for (int i = 0; i < NUM_PAIRS; i++) {
            assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
            assertEquals(pairs[i].getValue(), target.getValue());
        }
        table.close();
        assertEquals("Memory lost", NUM_MEM_PAGES + ADDITIONAL_MEM + ADDITIONAL_MEM, table.getFreeMemory().size());
    } catch (Exception e) {
        e.printStackTrace();
        fail("Error: " + e.getMessage());
    }
}
Also used : Random(java.util.Random) IntPair(org.apache.flink.runtime.operators.testutils.types.IntPair) MemorySegment(org.apache.flink.core.memory.MemorySegment) Test(org.junit.Test)

Example 37 with MemorySegment

use of org.apache.flink.core.memory.MemorySegment in project flink by apache.

the class CompactingHashTableTest method testHashTableGrowthWithInsert.

// ------------------------------------------------------------------------
//  tests
// ------------------------------------------------------------------------
/**
	 * This has to be duplicated in InPlaceMutableHashTableTest and CompactingHashTableTest
	 * because of the different constructor calls.
	 */
@Test
public void testHashTableGrowthWithInsert() {
    try {
        final int numElements = 1000000;
        List<MemorySegment> memory = getMemory(10000, 32 * 1024);
        // we create a hash table that thinks the records are super large. that makes it choose initially
        // a lot of memory for the partition buffers, and start with a smaller hash table. that way
        // we trigger a hash table growth early.
        CompactingHashTable<Tuple2<Long, String>> table = new CompactingHashTable<Tuple2<Long, String>>(tuple2LongStringSerializer, tuple2LongStringComparator, memory, 10000);
        table.open();
        for (long i = 0; i < numElements; i++) {
            table.insert(new Tuple2<Long, String>(i, String.valueOf(i)));
        }
        // make sure that all elements are contained via the entry iterator
        {
            BitSet bitSet = new BitSet(numElements);
            MutableObjectIterator<Tuple2<Long, String>> iter = table.getEntryIterator();
            Tuple2<Long, String> next;
            while ((next = iter.next()) != null) {
                assertNotNull(next.f0);
                assertNotNull(next.f1);
                assertEquals(next.f0.longValue(), Long.parseLong(next.f1));
                bitSet.set(next.f0.intValue());
            }
            assertEquals(numElements, bitSet.cardinality());
        }
        // make sure all entries are contained via the prober
        {
            CompactingHashTable<Tuple2<Long, String>>.HashTableProber<Long> proper = table.getProber(probeComparator, pairComparator);
            for (long i = 0; i < numElements; i++) {
                assertNotNull(proper.getMatchFor(i));
                assertNull(proper.getMatchFor(i + numElements));
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : MutableObjectIterator(org.apache.flink.util.MutableObjectIterator) BitSet(java.util.BitSet) MemorySegment(org.apache.flink.core.memory.MemorySegment) Tuple2(org.apache.flink.api.java.tuple.Tuple2) Test(org.junit.Test)

Example 38 with MemorySegment

use of org.apache.flink.core.memory.MemorySegment in project flink by apache.

the class CompactingHashTableTest method testResize.

@Test
public void testResize() {
    // Only CompactingHashTable
    try {
        final int NUM_MEM_PAGES = 30 * NUM_PAIRS / PAGE_SIZE;
        final Random rnd = new Random(RANDOM_SEED);
        final IntPair[] pairs = getRandomizedIntPairs(NUM_PAIRS, rnd);
        List<MemorySegment> memory = getMemory(NUM_MEM_PAGES);
        CompactingHashTable<IntPair> table = new CompactingHashTable<IntPair>(intPairSerializer, intPairComparator, memory);
        table.open();
        for (int i = 0; i < NUM_PAIRS; i++) {
            table.insert(pairs[i]);
        }
        AbstractHashTableProber<IntPair, IntPair> prober = table.getProber(intPairComparator, new SameTypePairComparator<>(intPairComparator));
        IntPair target = new IntPair();
        for (int i = 0; i < NUM_PAIRS; i++) {
            assertNotNull(prober.getMatchFor(pairs[i], target));
            assertEquals(pairs[i].getValue(), target.getValue());
        }
        // make sure there is enough memory for resize
        memory.addAll(getMemory(ADDITIONAL_MEM));
        Boolean b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
        assertTrue(b);
        for (int i = 0; i < NUM_PAIRS; i++) {
            assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
            assertEquals(pairs[i].getValue(), target.getValue());
        }
        table.close();
        assertEquals("Memory lost", NUM_MEM_PAGES + ADDITIONAL_MEM, table.getFreeMemory().size());
    } catch (Exception e) {
        e.printStackTrace();
        fail("Error: " + e.getMessage());
    }
}
Also used : Random(java.util.Random) IntPair(org.apache.flink.runtime.operators.testutils.types.IntPair) MemorySegment(org.apache.flink.core.memory.MemorySegment) Test(org.junit.Test)

Example 39 with MemorySegment

use of org.apache.flink.core.memory.MemorySegment in project flink by apache.

the class HashTableITCase method validateSpillingDuringInsertionIntPair.

/*
	 * This test validates a bug fix against former memory loss in the case where a partition was spilled
	 * during an insert into the same.
	 */
@Test
public void validateSpillingDuringInsertionIntPair() throws IOException, MemoryAllocationException {
    final int NUM_BUILD_KEYS = 500000;
    final int NUM_BUILD_VALS = 1;
    final int NUM_PROBE_KEYS = 10;
    final int NUM_PROBE_VALS = 1;
    MutableObjectIterator<IntPair> buildInput = new UniformIntPairGenerator(NUM_BUILD_KEYS, NUM_BUILD_VALS, false);
    // allocate the memory for the HashTable
    List<MemorySegment> memSegments;
    try {
        memSegments = this.memManager.allocatePages(MEM_OWNER, 85);
    } catch (MemoryAllocationException maex) {
        fail("Memory for the Join could not be provided.");
        return;
    }
    final MutableHashTable<IntPair, IntPair> join = new MutableHashTable<IntPair, IntPair>(this.pairBuildSideAccesssor, this.pairProbeSideAccesssor, this.pairBuildSideComparator, this.pairProbeSideComparator, this.pairComparator, memSegments, ioManager);
    join.open(buildInput, new UniformIntPairGenerator(NUM_PROBE_KEYS, NUM_PROBE_VALS, true));
    final IntPair recordReuse = new IntPair();
    int numRecordsInJoinResult = 0;
    int expectedNumResults = (Math.min(NUM_PROBE_KEYS, NUM_BUILD_KEYS) * NUM_BUILD_VALS) * NUM_PROBE_VALS;
    while (join.nextRecord()) {
        MutableObjectIterator<IntPair> buildSide = join.getBuildSideIterator();
        while (buildSide.next(recordReuse) != null) {
            numRecordsInJoinResult++;
        }
    }
    Assert.assertEquals("Wrong number of records in join result.", expectedNumResults, numRecordsInJoinResult);
    join.close();
    this.memManager.release(join.getFreedMemory());
}
Also used : MemoryAllocationException(org.apache.flink.runtime.memory.MemoryAllocationException) IntPair(org.apache.flink.runtime.operators.testutils.types.IntPair) UniformIntPairGenerator(org.apache.flink.runtime.operators.testutils.UniformIntPairGenerator) MemorySegment(org.apache.flink.core.memory.MemorySegment) Test(org.junit.Test)

Example 40 with MemorySegment

use of org.apache.flink.core.memory.MemorySegment in project flink by apache.

the class CompactingHashTable method insertOrReplaceRecord.

/**
	 * Replaces record in hash table if record already present or append record if not.
	 * May trigger expensive compaction.
	 * 
	 * @param record record to insert or replace
	 * @throws IOException
	 */
public void insertOrReplaceRecord(T record) throws IOException {
    if (this.closed) {
        return;
    }
    final int searchHashCode = MathUtils.jenkinsHash(this.buildSideComparator.hash(record));
    final int posHashCode = searchHashCode % this.numBuckets;
    // get the bucket for the given hash code
    final MemorySegment originalBucket = this.buckets[posHashCode >> this.bucketsPerSegmentBits];
    final int originalBucketOffset = (posHashCode & this.bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS;
    MemorySegment bucket = originalBucket;
    int bucketInSegmentOffset = originalBucketOffset;
    // get the basic characteristics of the bucket
    final int partitionNumber = bucket.get(bucketInSegmentOffset + HEADER_PARTITION_OFFSET);
    final InMemoryPartition<T> partition = this.partitions.get(partitionNumber);
    final MemorySegment[] overflowSegments = partition.overflowSegments;
    this.buildSideComparator.setReference(record);
    int countInSegment = bucket.getInt(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
    int numInSegment = 0;
    int posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH;
    // loop over all segments that are involved in the bucket (original bucket plus overflow buckets)
    while (true) {
        while (numInSegment < countInSegment) {
            final int thisCode = bucket.getInt(posInSegment);
            posInSegment += HASH_CODE_LEN;
            // check if the hash code matches
            if (thisCode == searchHashCode) {
                // get the pointer to the pair
                final int pointerOffset = bucketInSegmentOffset + BUCKET_POINTER_START_OFFSET + (numInSegment * POINTER_LEN);
                final long pointer = bucket.getLong(pointerOffset);
                // deserialize the key to check whether it is really equal, or whether we had only a hash collision
                T valueAtPosition = partition.readRecordAt(pointer);
                if (this.buildSideComparator.equalToReference(valueAtPosition)) {
                    long newPointer = insertRecordIntoPartition(record, partition, true);
                    bucket.putLong(pointerOffset, newPointer);
                    return;
                }
            }
            numInSegment++;
        }
        // this segment is done. check if there is another chained bucket
        long newForwardPointer = bucket.getLong(bucketInSegmentOffset + HEADER_FORWARD_OFFSET);
        if (newForwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
            // nothing found. append and insert
            long pointer = insertRecordIntoPartition(record, partition, false);
            if (countInSegment < NUM_ENTRIES_PER_BUCKET) {
                // we are good in our current bucket, put the values
                // hash code
                bucket.putInt(bucketInSegmentOffset + BUCKET_HEADER_LENGTH + (countInSegment * HASH_CODE_LEN), searchHashCode);
                // pointer
                bucket.putLong(bucketInSegmentOffset + BUCKET_POINTER_START_OFFSET + (countInSegment * POINTER_LEN), pointer);
                // update count
                bucket.putInt(bucketInSegmentOffset + HEADER_COUNT_OFFSET, countInSegment + 1);
            } else {
                insertBucketEntryFromStart(originalBucket, originalBucketOffset, searchHashCode, pointer, partitionNumber);
            }
            return;
        }
        final int overflowSegNum = (int) (newForwardPointer >>> 32);
        bucket = overflowSegments[overflowSegNum];
        bucketInSegmentOffset = (int) newForwardPointer;
        countInSegment = bucket.getInt(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
        posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH;
        numInSegment = 0;
    }
}
Also used : MemorySegment(org.apache.flink.core.memory.MemorySegment)

Aggregations

MemorySegment (org.apache.flink.core.memory.MemorySegment)161 Test (org.junit.Test)86 DummyInvokable (org.apache.flink.runtime.operators.testutils.DummyInvokable)38 ArrayList (java.util.ArrayList)30 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)24 IntPair (org.apache.flink.runtime.operators.testutils.types.IntPair)24 MemoryAllocationException (org.apache.flink.runtime.memory.MemoryAllocationException)22 IOException (java.io.IOException)19 TestData (org.apache.flink.runtime.operators.testutils.TestData)18 FileIOChannel (org.apache.flink.runtime.io.disk.iomanager.FileIOChannel)17 UniformIntPairGenerator (org.apache.flink.runtime.operators.testutils.UniformIntPairGenerator)16 IOManager (org.apache.flink.runtime.io.disk.iomanager.IOManager)15 IOManagerAsync (org.apache.flink.runtime.io.disk.iomanager.IOManagerAsync)15 EOFException (java.io.EOFException)14 AbstractInvokable (org.apache.flink.runtime.jobgraph.tasks.AbstractInvokable)14 Random (java.util.Random)11 ChannelReaderInputView (org.apache.flink.runtime.io.disk.iomanager.ChannelReaderInputView)10 UniformRecordGenerator (org.apache.flink.runtime.operators.testutils.UniformRecordGenerator)9 Record (org.apache.flink.types.Record)9 MutableObjectIterator (org.apache.flink.util.MutableObjectIterator)9