Search in sources :

Example 61 with MemorySegment

use of org.apache.flink.core.memory.MemorySegment in project flink by apache.

the class PartitionedFileWriteReadTest method testNotWriteDataOfTheSameSubpartitionTogether.

@Test(expected = IllegalStateException.class)
public void testNotWriteDataOfTheSameSubpartitionTogether() throws Exception {
    PartitionedFileWriter partitionedFileWriter = createPartitionedFileWriter(2);
    try {
        MemorySegment segment = MemorySegmentFactory.allocateUnpooledSegment(1024);
        NetworkBuffer buffer1 = new NetworkBuffer(segment, (buf) -> {
        });
        partitionedFileWriter.writeBuffers(getBufferWithChannels(buffer1, 1));
        NetworkBuffer buffer2 = new NetworkBuffer(segment, (buf) -> {
        });
        partitionedFileWriter.writeBuffers(getBufferWithChannels(buffer2, 0));
        NetworkBuffer buffer3 = new NetworkBuffer(segment, (buf) -> {
        });
        partitionedFileWriter.writeBuffers(getBufferWithChannels(buffer3, 1));
    } finally {
        partitionedFileWriter.finish();
    }
}
Also used : NetworkBuffer(org.apache.flink.runtime.io.network.buffer.NetworkBuffer) MemorySegment(org.apache.flink.core.memory.MemorySegment) Test(org.junit.Test)

Example 62 with MemorySegment

use of org.apache.flink.core.memory.MemorySegment in project flink by apache.

the class FixedLengthRecordSorterTest method testWriteAndRead.

@Test
public void testWriteAndRead() throws Exception {
    final int numSegments = MEMORY_SIZE / MEMORY_PAGE_SIZE;
    final List<MemorySegment> memory = this.memoryManager.allocatePages(new DummyInvokable(), numSegments);
    FixedLengthRecordSorter<IntPair> sorter = newSortBuffer(memory);
    RandomIntPairGenerator generator = new RandomIntPairGenerator(SEED);
    // long startTime = System.currentTimeMillis();
    // write the records
    IntPair record = new IntPair();
    int num = -1;
    do {
        generator.next(record);
        num++;
    } while (sorter.write(record) && num < 3354624);
    // System.out.println("WRITE TIME " + (System.currentTimeMillis() - startTime));
    // re-read the records
    generator.reset();
    IntPair readTarget = new IntPair();
    // startTime = System.currentTimeMillis();
    int i = 0;
    while (i < num) {
        generator.next(record);
        readTarget = sorter.getRecord(readTarget, i++);
        int rk = readTarget.getKey();
        int gk = record.getKey();
        int rv = readTarget.getValue();
        int gv = record.getValue();
        if (gk != rk) {
            Assert.fail("The re-read key is wrong " + i);
        }
        if (gv != rv) {
            Assert.fail("The re-read value is wrong");
        }
    }
    // System.out.println("READ TIME " + (System.currentTimeMillis() - startTime));
    // System.out.println("RECORDS " + num);
    // release the memory occupied by the buffers
    sorter.dispose();
    this.memoryManager.release(memory);
}
Also used : DummyInvokable(org.apache.flink.runtime.operators.testutils.DummyInvokable) RandomIntPairGenerator(org.apache.flink.runtime.operators.testutils.RandomIntPairGenerator) IntPair(org.apache.flink.runtime.operators.testutils.types.IntPair) MemorySegment(org.apache.flink.core.memory.MemorySegment) Test(org.junit.Test)

Example 63 with MemorySegment

use of org.apache.flink.core.memory.MemorySegment in project flink by apache.

the class FixedLengthRecordSorterTest method testReset.

@Test
public void testReset() throws Exception {
    final int numSegments = MEMORY_SIZE / MEMORY_PAGE_SIZE;
    final List<MemorySegment> memory = this.memoryManager.allocatePages(new DummyInvokable(), numSegments);
    FixedLengthRecordSorter<IntPair> sorter = newSortBuffer(memory);
    RandomIntPairGenerator generator = new RandomIntPairGenerator(SEED);
    // write the buffer full with the first set of records
    IntPair record = new IntPair();
    int num = -1;
    do {
        generator.next(record);
        num++;
    } while (sorter.write(record) && num < 3354624);
    sorter.reset();
    // write a second sequence of records. since the values are of fixed length, we must be able
    // to write an equal number
    generator.reset();
    // write the buffer full with the first set of records
    int num2 = -1;
    do {
        generator.next(record);
        num2++;
    } while (sorter.write(record) && num2 < 3354624);
    Assert.assertEquals("The number of records written after the reset was not the same as before.", num, num2);
    // re-read the records
    generator.reset();
    IntPair readTarget = new IntPair();
    int i = 0;
    while (i < num) {
        generator.next(record);
        readTarget = sorter.getRecord(readTarget, i++);
        int rk = readTarget.getKey();
        int gk = record.getKey();
        int rv = readTarget.getValue();
        int gv = record.getValue();
        Assert.assertEquals("The re-read key is wrong", gk, rk);
        Assert.assertEquals("The re-read value is wrong", gv, rv);
    }
    // release the memory occupied by the buffers
    sorter.dispose();
    this.memoryManager.release(memory);
}
Also used : DummyInvokable(org.apache.flink.runtime.operators.testutils.DummyInvokable) RandomIntPairGenerator(org.apache.flink.runtime.operators.testutils.RandomIntPairGenerator) IntPair(org.apache.flink.runtime.operators.testutils.types.IntPair) MemorySegment(org.apache.flink.core.memory.MemorySegment) Test(org.junit.Test)

Example 64 with MemorySegment

use of org.apache.flink.core.memory.MemorySegment in project flink by apache.

the class FixedLengthRecordSorterTest method testSwap.

/**
 * The swap test fills the sort buffer and swaps all elements such that they are backwards. It
 * then resets the generator, goes backwards through the buffer and compares for equality.
 */
@Test
public void testSwap() throws Exception {
    final int numSegments = MEMORY_SIZE / MEMORY_PAGE_SIZE;
    final List<MemorySegment> memory = this.memoryManager.allocatePages(new DummyInvokable(), numSegments);
    FixedLengthRecordSorter<IntPair> sorter = newSortBuffer(memory);
    RandomIntPairGenerator generator = new RandomIntPairGenerator(SEED);
    // write the records
    IntPair record = new IntPair();
    int num = -1;
    do {
        generator.next(record);
        num++;
    } while (sorter.write(record) && num < 3354624);
    // swap the records
    int start = 0, end = num - 1;
    while (start < end) {
        sorter.swap(start++, end--);
    }
    // re-read the records
    generator.reset();
    IntPair readTarget = new IntPair();
    int i = num - 1;
    while (i >= 0) {
        generator.next(record);
        readTarget = sorter.getRecord(readTarget, i--);
        int rk = readTarget.getKey();
        int gk = record.getKey();
        int rv = readTarget.getValue();
        int gv = record.getValue();
        Assert.assertEquals("The re-read key is wrong", gk, rk);
        Assert.assertEquals("The re-read value is wrong", gv, rv);
    }
    // release the memory occupied by the buffers
    sorter.dispose();
    this.memoryManager.release(memory);
}
Also used : DummyInvokable(org.apache.flink.runtime.operators.testutils.DummyInvokable) RandomIntPairGenerator(org.apache.flink.runtime.operators.testutils.RandomIntPairGenerator) IntPair(org.apache.flink.runtime.operators.testutils.types.IntPair) MemorySegment(org.apache.flink.core.memory.MemorySegment) Test(org.junit.Test)

Example 65 with MemorySegment

use of org.apache.flink.core.memory.MemorySegment in project flink by apache.

the class FixedLengthRecordSorterTest method testWriteAndIterator.

@Test
public void testWriteAndIterator() throws Exception {
    final int numSegments = MEMORY_SIZE / MEMORY_PAGE_SIZE;
    final List<MemorySegment> memory = this.memoryManager.allocatePages(new DummyInvokable(), numSegments);
    FixedLengthRecordSorter<IntPair> sorter = newSortBuffer(memory);
    RandomIntPairGenerator generator = new RandomIntPairGenerator(SEED);
    // write the records
    IntPair record = new IntPair();
    int num = -1;
    do {
        generator.next(record);
        num++;
    } while (sorter.write(record));
    // re-read the records
    generator.reset();
    MutableObjectIterator<IntPair> iter = sorter.getIterator();
    IntPair readTarget = new IntPair();
    int count = 0;
    while ((readTarget = iter.next(readTarget)) != null) {
        count++;
        generator.next(record);
        int rk = readTarget.getKey();
        int gk = record.getKey();
        int rv = readTarget.getValue();
        int gv = record.getValue();
        Assert.assertEquals("The re-read key is wrong", gk, rk);
        Assert.assertEquals("The re-read value is wrong", gv, rv);
    }
    Assert.assertEquals("Incorrect number of records", num, count);
    // release the memory occupied by the buffers
    sorter.dispose();
    this.memoryManager.release(memory);
}
Also used : DummyInvokable(org.apache.flink.runtime.operators.testutils.DummyInvokable) RandomIntPairGenerator(org.apache.flink.runtime.operators.testutils.RandomIntPairGenerator) IntPair(org.apache.flink.runtime.operators.testutils.types.IntPair) MemorySegment(org.apache.flink.core.memory.MemorySegment) Test(org.junit.Test)

Aggregations

MemorySegment (org.apache.flink.core.memory.MemorySegment)375 Test (org.junit.Test)136 ArrayList (java.util.ArrayList)52 DummyInvokable (org.apache.flink.runtime.operators.testutils.DummyInvokable)44 IOException (java.io.IOException)37 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)29 Buffer (org.apache.flink.runtime.io.network.buffer.Buffer)26 NetworkBuffer (org.apache.flink.runtime.io.network.buffer.NetworkBuffer)25 MemoryAllocationException (org.apache.flink.runtime.memory.MemoryAllocationException)24 IntPair (org.apache.flink.runtime.operators.testutils.types.IntPair)24 FileIOChannel (org.apache.flink.runtime.io.disk.iomanager.FileIOChannel)20 EOFException (java.io.EOFException)18 ByteBuffer (java.nio.ByteBuffer)18 AbstractInvokable (org.apache.flink.runtime.jobgraph.tasks.AbstractInvokable)18 TestData (org.apache.flink.runtime.operators.testutils.TestData)18 Random (java.util.Random)16 UniformIntPairGenerator (org.apache.flink.runtime.operators.testutils.UniformIntPairGenerator)16 Chunk (org.apache.flink.runtime.state.heap.space.Chunk)15 BinaryRowData (org.apache.flink.table.data.binary.BinaryRowData)15 IOManagerAsync (org.apache.flink.runtime.io.disk.iomanager.IOManagerAsync)14