use of org.apache.flink.core.memory.MemorySegment in project flink by apache.
the class FixedLengthRecordSorterTest method testFlushPartialMemoryPage.
@Test
public void testFlushPartialMemoryPage() throws Exception {
// Insert IntPair which would fill 2 memory pages.
final int NUM_RECORDS = 2 * MEMORY_PAGE_SIZE / 8;
final List<MemorySegment> memory = this.memoryManager.allocatePages(new DummyInvokable(), 3);
FixedLengthRecordSorter<IntPair> sorter = newSortBuffer(memory);
UniformIntPairGenerator generator = new UniformIntPairGenerator(Integer.MAX_VALUE, 1, false);
// write the records
IntPair record = new IntPair();
int num = -1;
do {
generator.next(record);
num++;
} while (sorter.write(record) && num < NUM_RECORDS);
FileIOChannel.ID channelID = this.ioManager.createChannelEnumerator().next();
BlockChannelWriter<MemorySegment> blockChannelWriter = this.ioManager.createBlockChannelWriter(channelID);
final List<MemorySegment> writeBuffer = this.memoryManager.allocatePages(new DummyInvokable(), 3);
ChannelWriterOutputView outputView = new ChannelWriterOutputView(blockChannelWriter, writeBuffer, writeBuffer.get(0).size());
sorter.writeToOutput(outputView, 1, NUM_RECORDS - 1);
this.memoryManager.release(outputView.close());
BlockChannelReader<MemorySegment> blockChannelReader = this.ioManager.createBlockChannelReader(channelID);
final List<MemorySegment> readBuffer = this.memoryManager.allocatePages(new DummyInvokable(), 3);
ChannelReaderInputView readerInputView = new ChannelReaderInputView(blockChannelReader, readBuffer, false);
final List<MemorySegment> dataBuffer = this.memoryManager.allocatePages(new DummyInvokable(), 3);
ChannelReaderInputViewIterator<IntPair> iterator = new ChannelReaderInputViewIterator(readerInputView, dataBuffer, this.serializer);
record = iterator.next(record);
int i = 1;
while (record != null) {
Assert.assertEquals(i, record.getKey());
record = iterator.next(record);
i++;
}
Assert.assertEquals(NUM_RECORDS, i);
this.memoryManager.release(dataBuffer);
// release the memory occupied by the buffers
sorter.dispose();
this.memoryManager.release(memory);
}
use of org.apache.flink.core.memory.MemorySegment in project flink by apache.
the class LargeRecordHandlerTest method testRecordHandlerCompositeKey.
@Test
public void testRecordHandlerCompositeKey() {
final int PAGE_SIZE = 4 * 1024;
final int NUM_PAGES = 24;
final int NUM_RECORDS = 25000;
try (final IOManager ioMan = new IOManagerAsync()) {
final MemoryManager memMan = MemoryManagerBuilder.newBuilder().setMemorySize(NUM_PAGES * PAGE_SIZE).setPageSize(PAGE_SIZE).build();
final AbstractInvokable owner = new DummyInvokable();
final List<MemorySegment> initialMemory = memMan.allocatePages(owner, 6);
final List<MemorySegment> sortMemory = memMan.allocatePages(owner, NUM_PAGES - 6);
final TupleTypeInfo<Tuple3<Long, String, Byte>> typeInfo = (TupleTypeInfo<Tuple3<Long, String, Byte>>) TypeInformation.of(new TypeHint<Tuple3<Long, String, Byte>>() {
});
final TypeSerializer<Tuple3<Long, String, Byte>> serializer = typeInfo.createSerializer(new ExecutionConfig());
final TypeComparator<Tuple3<Long, String, Byte>> comparator = typeInfo.createComparator(new int[] { 2, 0 }, new boolean[] { true, true }, 0, new ExecutionConfig());
LargeRecordHandler<Tuple3<Long, String, Byte>> handler = new LargeRecordHandler<Tuple3<Long, String, Byte>>(serializer, comparator, ioMan, memMan, initialMemory, owner, 128, owner.getExecutionConfig());
assertFalse(handler.hasData());
// add the test data
Random rnd = new Random();
for (int i = 0; i < NUM_RECORDS; i++) {
long val = rnd.nextLong();
handler.addRecord(new Tuple3<Long, String, Byte>(val, String.valueOf(val), (byte) val));
assertTrue(handler.hasData());
}
MutableObjectIterator<Tuple3<Long, String, Byte>> sorted = handler.finishWriteAndSortKeys(sortMemory);
try {
handler.addRecord(new Tuple3<Long, String, Byte>(92L, "peter pepper", (byte) 1));
fail("should throw an exception");
} catch (IllegalStateException e) {
// expected
}
Tuple3<Long, String, Byte> previous = null;
Tuple3<Long, String, Byte> next;
while ((next = sorted.next(null)) != null) {
// key and value must be equal
assertTrue(next.f0.equals(Long.parseLong(next.f1)));
assertTrue(next.f0.byteValue() == next.f2);
// order must be correct
if (previous != null) {
assertTrue(previous.f2 <= next.f2);
assertTrue(previous.f2.byteValue() != next.f2.byteValue() || previous.f0 <= next.f0);
}
previous = next;
}
handler.close();
assertFalse(handler.hasData());
handler.close();
try {
handler.addRecord(new Tuple3<Long, String, Byte>(92L, "peter pepper", (byte) 1));
fail("should throw an exception");
} catch (IllegalStateException e) {
// expected
}
assertTrue(memMan.verifyEmpty());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.core.memory.MemorySegment in project flink by apache.
the class NormalizedKeySorterTest method testWriteAndIterator.
@Test
public void testWriteAndIterator() throws Exception {
final int numSegments = MEMORY_SIZE / MEMORY_PAGE_SIZE;
final List<MemorySegment> memory = this.memoryManager.allocatePages(new DummyInvokable(), numSegments);
NormalizedKeySorter<Tuple2<Integer, String>> sorter = newSortBuffer(memory);
TestData.TupleGenerator generator = new TestData.TupleGenerator(SEED, KEY_MAX, VALUE_LENGTH, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
// write the records
Tuple2<Integer, String> record = new Tuple2<>();
do {
generator.next(record);
} while (sorter.write(record));
// re-read the records
generator.reset();
MutableObjectIterator<Tuple2<Integer, String>> iter = sorter.getIterator();
Tuple2<Integer, String> readTarget = new Tuple2<>();
while ((readTarget = iter.next(readTarget)) != null) {
generator.next(record);
int rk = readTarget.f0;
int gk = record.f0;
String rv = readTarget.f1;
String gv = record.f1;
Assert.assertEquals("The re-read key is wrong", gk, rk);
Assert.assertEquals("The re-read value is wrong", gv, rv);
}
// release the memory occupied by the buffers
sorter.dispose();
this.memoryManager.release(memory);
}
use of org.apache.flink.core.memory.MemorySegment in project flink by apache.
the class NormalizedKeySorterTest method testSwap.
/**
* The swap test fills the sort buffer and swaps all elements such that they are backwards. It
* then resets the generator, goes backwards through the buffer and compares for equality.
*/
@Test
public void testSwap() throws Exception {
final int numSegments = MEMORY_SIZE / MEMORY_PAGE_SIZE;
final List<MemorySegment> memory = this.memoryManager.allocatePages(new DummyInvokable(), numSegments);
NormalizedKeySorter<Tuple2<Integer, String>> sorter = newSortBuffer(memory);
TestData.TupleGenerator generator = new TestData.TupleGenerator(SEED, KEY_MAX, VALUE_LENGTH, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
// write the records
Tuple2<Integer, String> record = new Tuple2<>();
int num = -1;
do {
generator.next(record);
num++;
} while (sorter.write(record));
// swap the records
int start = 0, end = num - 1;
while (start < end) {
sorter.swap(start++, end--);
}
// re-read the records
generator.reset();
Tuple2<Integer, String> readTarget = new Tuple2<>();
int i = num - 1;
while (i >= 0) {
generator.next(record);
readTarget = sorter.getRecord(readTarget, i--);
int rk = readTarget.f0;
int gk = record.f0;
String rv = readTarget.f1;
String gv = record.f1;
Assert.assertEquals("The re-read key is wrong", gk, rk);
Assert.assertEquals("The re-read value is wrong", gv, rv);
}
// release the memory occupied by the buffers
sorter.dispose();
this.memoryManager.release(memory);
}
use of org.apache.flink.core.memory.MemorySegment in project flink by apache.
the class LargeRecordHandlerITCase method fileTest.
@Test
public void fileTest() {
final int PAGE_SIZE = 4 * 1024;
final int NUM_PAGES = 4;
final int NUM_RECORDS = 10;
FileIOChannel.ID channel = null;
try (final IOManager ioMan = new IOManagerAsync()) {
final MemoryManager memMan = MemoryManagerBuilder.newBuilder().setMemorySize(NUM_PAGES * PAGE_SIZE).setPageSize(PAGE_SIZE).build();
final AbstractInvokable owner = new DummyInvokable();
final List<MemorySegment> memory = memMan.allocatePages(owner, NUM_PAGES);
final TypeInformation<?>[] types = new TypeInformation<?>[] { BasicTypeInfo.LONG_TYPE_INFO, new ValueTypeInfo<SomeVeryLongValue>(SomeVeryLongValue.class), BasicTypeInfo.BYTE_TYPE_INFO };
final TupleTypeInfo<Tuple3<Long, SomeVeryLongValue, Byte>> typeInfo = new TupleTypeInfo<Tuple3<Long, SomeVeryLongValue, Byte>>(types);
final TypeSerializer<Tuple3<Long, SomeVeryLongValue, Byte>> serializer = typeInfo.createSerializer(new ExecutionConfig());
channel = ioMan.createChannel();
FileChannelOutputView out = new FileChannelOutputView(ioMan.createBlockChannelWriter(channel), memMan, memory, PAGE_SIZE);
// add the test data
Random rnd = new Random();
List<Long> offsets = new ArrayList<Long>();
for (int i = 0; i < NUM_RECORDS; i++) {
offsets.add(out.getWriteOffset());
long val = rnd.nextLong();
Tuple3<Long, SomeVeryLongValue, Byte> next = new Tuple3<Long, SomeVeryLongValue, Byte>(val, new SomeVeryLongValue((int) val), (byte) val);
serializer.serialize(next, out);
}
out.close();
for (int i = 1; i < offsets.size(); i++) {
assertTrue(offsets.get(i) > offsets.get(i - 1));
}
memMan.allocatePages(owner, memory, NUM_PAGES);
SeekableFileChannelInputView in = new SeekableFileChannelInputView(ioMan, channel, memMan, memory, out.getBytesInLatestSegment());
for (int i = 0; i < NUM_RECORDS; i++) {
in.seek(offsets.get(i));
Tuple3<Long, SomeVeryLongValue, Byte> next = serializer.deserialize(in);
// key and value must be equal
assertTrue(next.f0.intValue() == next.f1.val());
assertTrue(next.f0.byteValue() == next.f2);
}
in.closeAndDelete();
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
Aggregations